diff --git a/README.md b/README.md index 81ae69e..72bd59d 100644 --- a/README.md +++ b/README.md @@ -267,6 +267,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import GenerationConfig from qwen_generation_utils import make_context, decode_tokens, get_stop_words_ids +# To generate attention masks automatically, it is necessary to assign distinct +# token_ids to pad_token and eos_token, and set pad_token_id in the generation_config. tokenizer = AutoTokenizer.from_pretrained( './', pad_token='<|extra_0|>',