From c6bcf1f326a6a70851529f9d763371060a139acc Mon Sep 17 00:00:00 2001 From: Wang Peng <36780733+logicwong@users.noreply.github.com> Date: Thu, 3 Aug 2023 23:08:59 +0800 Subject: [PATCH] Update README_CN.md --- README_CN.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README_CN.md b/README_CN.md index 59b5663..65a43ef 100644 --- a/README_CN.md +++ b/README_CN.md @@ -82,6 +82,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) +## 使用bf16精度 +# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval() +## 使用fp16精度 +# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval() +# 默认使用fp32精度 model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval() model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 @@ -115,6 +120,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True) +## 使用bf16精度 +# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True, bf16=True).eval() +## 使用fp16精度 +# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True, fp16=True).eval() +# 默认使用fp32精度 model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True).eval() model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参