Update README.md

main
Wang Peng 12 months ago committed by Ren Xuancheng
parent 99b13b4fd2
commit 1c34702e82

@ -267,6 +267,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from qwen_generation_utils import make_context, decode_tokens, get_stop_words_ids
# To generate attention masks automatically, it is necessary to assign distinct
# token_ids to pad_token and eos_token, and set pad_token_id in the generation_config.
tokenizer = AutoTokenizer.from_pretrained(
'./',
pad_token='<|extra_0|>',

Loading…
Cancel
Save