diff --git a/README_CN.md b/README_CN.md index 65a43ef..f2c4e01 100644 --- a/README_CN.md +++ b/README_CN.md @@ -82,9 +82,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) -## 使用bf16精度 +## 打开bf16精度,A100、H100、RTX3060、RTX3070等显卡建议启用以节省显存 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval() -## 使用fp16精度 +## 打开fp16精度,V100、P100、T4等显卡建议启用以节省显存 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval() # 默认使用fp32精度 model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval() @@ -120,9 +120,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True) -## 使用bf16精度 +## 打开bf16精度,A100、H100、RTX3060、RTX3070等显卡建议启用以节省显存 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True, bf16=True).eval() -## 使用fp16精度 +## 打开fp16精度,V100、P100、T4等显卡建议启用以节省显存 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True, fp16=True).eval() # 默认使用fp32精度 model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True).eval()