From 1c34702e82ba2b66441bf769704c209f9af127ec Mon Sep 17 00:00:00 2001 From: Wang Peng <798960736@qq.com> Date: Tue, 30 Jan 2024 21:09:02 +0800 Subject: [PATCH] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 81ae69e..72bd59d 100644 --- a/README.md +++ b/README.md @@ -267,6 +267,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import GenerationConfig from qwen_generation_utils import make_context, decode_tokens, get_stop_words_ids +# To generate attention masks automatically, it is necessary to assign distinct +# token_ids to pad_token and eos_token, and set pad_token_id in the generation_config. tokenizer = AutoTokenizer.from_pretrained( './', pad_token='<|extra_0|>',