From f2d5336addc0735ce9640e526dabd43175d14d4a Mon Sep 17 00:00:00 2001 From: Junyang Lin Date: Fri, 4 Aug 2023 15:39:35 +0800 Subject: [PATCH] add notes for tokenizer --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 41218e7..3750768 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,9 @@ To use Qwen-7B-Chat for the inference, all you need to do is to input a few line from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig +# Note: our tokenizer rejects attacks and so that you cannot input special tokens like <|endoftext|> or it will throw an error. +# To remove the strategy, you can add `allowed_special`, which accepts the string "all" or a `set` of special tokens. +# For example: tokens = tokenizer(text, allowed_special="all") tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) ## use bf16 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()