From a145875018b1165185b1b954d08d1ab2abfcbcda Mon Sep 17 00:00:00 2001
From: "Keming (Luke) Lu" <42954814+Lukeming-tsinghua@users.noreply.github.com>
Date: Tue, 12 Sep 2023 11:25:59 +0800
Subject: [PATCH 1/2] Update README.md
---
README.md | 35 +++++++++++++----------------------
1 file changed, 13 insertions(+), 22 deletions(-)
diff --git a/README.md b/README.md
index 27e8b05..2210223 100644
--- a/README.md
+++ b/README.md
@@ -180,28 +180,19 @@ print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
ModelScope is an opensource platform for Model-as-a-Service (MaaS), which provides flexible and cost-effective model service to AI developers. Similarly, you can run the models with ModelScope as shown below:
```python
-import os
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-from modelscope import snapshot_download
-
-model_id = 'QWen/qwen-7b-chat'
-revision = 'v1.0.0'
-
-model_dir = snapshot_download(model_id, revision)
-
-pipe = pipeline(
-task=Tasks.chat, model=model_dir, device_map='auto')
-history = None
-
-text = '浙江的省会在哪里?'
-results = pipe(text, history=history)
-response, history = results['response'], results['history']
-print(f'Response: {response}')
-text = '它有什么好玩的地方呢?'
-results = pipe(text, history=history)
-response, history = results['response'], results['history']
-print(f'Response: {response}')
+from modelscope import AutoModelForCausalLM, AutoTokenizer
+from modelscope import GenerationConfig
+
+tokenizer = AutoTokenizer.from_pretrained("qwen/Qwen-7B-Chat", revision = 'v1.0.5',trust_remote_code=True)
+model = AutoModelForCausalLM.from_pretrained("qwen/Qwen-7B-Chat", revision = 'v1.0.5',device_map="auto", trust_remote_code=True,fp16 = True).eval()
+model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat",revision = 'v1.0.5', trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
+
+response, history = model.chat(tokenizer, "你好", history=None)
+print(response)
+response, history = model.chat(tokenizer, "浙江的省会在哪里?", history=history)
+print(response)
+response, history = model.chat(tokenizer, "它有什么好玩的景点", history=history)
+print(response)
```
From 861086b66df9b04c4e5d4ae3aea8b530b6ab190c Mon Sep 17 00:00:00 2001
From: Yang An
Date: Tue, 12 Sep 2023 11:29:32 +0800
Subject: [PATCH 2/2] Update README.md
---
README.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 2210223..757ab30 100644
--- a/README.md
+++ b/README.md
@@ -183,9 +183,9 @@ ModelScope is an opensource platform for Model-as-a-Service (MaaS), which provid
from modelscope import AutoModelForCausalLM, AutoTokenizer
from modelscope import GenerationConfig
-tokenizer = AutoTokenizer.from_pretrained("qwen/Qwen-7B-Chat", revision = 'v1.0.5',trust_remote_code=True)
-model = AutoModelForCausalLM.from_pretrained("qwen/Qwen-7B-Chat", revision = 'v1.0.5',device_map="auto", trust_remote_code=True,fp16 = True).eval()
-model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat",revision = 'v1.0.5', trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
+tokenizer = AutoTokenizer.from_pretrained("qwen/Qwen-7B-Chat", revision='v1.0.5', trust_remote_code=True)
+model = AutoModelForCausalLM.from_pretrained("qwen/Qwen-7B-Chat", revision='v1.0.5', device_map="auto", trust_remote_code=True, fp16=True).eval()
+model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", revision='v1.0.5', trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
response, history = model.chat(tokenizer, "你好", history=None)
print(response)