From c612df154a2c39d09af8a8b1fa97a65d8ec512e5 Mon Sep 17 00:00:00 2001
From: Junyang Lin <justinlin930319@hotmail.com>
Date: Tue, 8 Aug 2023 21:17:04 +0800
Subject: [PATCH] Update web_demo.py

---
 web_demo.py | 76 +++++++++++++++++++++++++++++++++++------------------
 1 file changed, 51 insertions(+), 25 deletions(-)

diff --git a/web_demo.py b/web_demo.py
index dd52bd3..66ce079 100755
--- a/web_demo.py
+++ b/web_demo.py
@@ -19,25 +19,33 @@ parser.add_argument("--model_revision", type=str, default="")
 args = parser.parse_args(sys.argv[1:])
 print("Args:" + str(args))
 
-tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True, resume_download=True)
+tokenizer = AutoTokenizer.from_pretrained(
+    "Qwen/Qwen-7B-Chat", trust_remote_code=True, resume_download=True
+)
 
 model = AutoModelForCausalLM.from_pretrained(
     "Qwen/Qwen-7B-Chat",
     device_map="auto",
     trust_remote_code=True,
     resume_download=True,
-    **{"revision": args.model_revision} if args.model_revision is not None and args.model_revision != "" and args.model_revision != "None" else {},
+    **{"revision": args.model_revision}
+    if args.model_revision is not None
+    and args.model_revision != ""
+    and args.model_revision != "None"
+    else {},
 ).eval()
 
-model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True, resume_download=True)
+model.generation_config = GenerationConfig.from_pretrained(
+    "Qwen/Qwen-7B-Chat", trust_remote_code=True, resume_download=True
+)
 
-if 'exit' in args:
+if "exit" in args:
     if args.exit:
         sys.exit(0)
     else:
         del args.exit
 
-if 'model_revision' in args:
+if "model_revision" in args:
     del args.model_revision
 
 
@@ -62,11 +70,11 @@ def parse_text(text):
     for i, line in enumerate(lines):
         if "```" in line:
             count += 1
-            items = line.split('`')
+            items = line.split("`")
             if count % 2 == 1:
                 lines[i] = f'<pre><code class="language-{items[-1]}">'
             else:
-                lines[i] = f'<br></code></pre>'
+                lines[i] = f"<br></code></pre>"
         else:
             if i > 0:
                 if count % 2 == 1:
@@ -90,46 +98,64 @@ def parse_text(text):
 task_history = []
 
 
-def predict(input, chatbot):
-    print('Q: ' + parse_text(input))
-    chatbot.append((parse_text(input), ""))
+def predict(query, chatbot):
+    print("User: " + parse_text(query))
+    chatbot.append((parse_text(query), ""))
     fullResponse = ""
 
-    for response in model.chat_stream(tokenizer, input, history=task_history):
-        chatbot[-1] = (parse_text(input), parse_text(response))
+    for response in model.chat_stream(tokenizer, query, history=task_history):
+        chatbot[-1] = (parse_text(query), parse_text(response))
 
         yield chatbot
         fullResponse = parse_text(response)
 
-    task_history.append((input, fullResponse))
-    print("A: " + parse_text(fullResponse))
+    task_history.append((query, fullResponse))
+    print("Qwen-7B-Chat: " + parse_text(fullResponse))
+    
+
+# Temporarily chat_stream does not support sampling, and thus regenerate does not work for now.
+def regenerate(chatbot):
+    if not task_history:
+        yield chatbot
+        return
+    item = task_history.pop(-1)
+    chatbot.pop(-1)
+    yield from predict(item[0], chatbot)
 
 
 def reset_user_input():
-    return gr.update(value='')
+    return gr.update(value="")
 
 
 def reset_state():
-    task_history = []
+    task_history.clear()
     return []
 
 
 with gr.Blocks() as demo:
-    gr.HTML("""<h1 align="center">通义千问 - QwenLM/Qwen-7B</h1>""")
+    gr.Markdown("""<p align="center"><img src="https://modelscope.cn/api/v1/models/qwen/Qwen-7B-Chat/repo?Revision=master&FilePath=assets/logo.jpeg&View=true" style="height: 80px"/><p>""")
+    gr.Markdown("""<center><font size=8>Qwen-7B-Chat Bot</center>""")
+    gr.Markdown(
+        """<center><font size=3>This WebUI is based on Qwen-7B-Chat, developed by Alibaba Cloud. (本WebUI基于Qwen-7B-Chat打造,实现聊天机器人功能。)</center>"""
+    )
+    gr.Markdown(
+        """<center><font size=4>Qwen-7B <a href="https://modelscope.cn/models/qwen/Qwen-7B/summary">🤖 <a> | <a href="https://huggingface.co/Qwen/Qwen-7B">🤗</a>&nbsp | Qwen-7B-Chat <a href="https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary">🤖 <a>| <a href="https://huggingface.co/Qwen/Qwen-7B-Chat">🤗</a>&nbsp | &nbsp<a href="https://github.com/QwenLM/Qwen-7B/blob/main/tech_memo.md">Report</a></center>"""
+    )
+    
+    chatbot = gr.Chatbot(lines=10, label='Qwen-7B-Chat', elem_classes="control-height")
+    query = gr.Textbox(lines=2, label='Input')
 
-    chatbot = gr.Chatbot()
     with gr.Row():
-        with gr.Column(scale=4):
-            with gr.Column(scale=12):
-                query = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(container=False)
-            with gr.Column(min_width=32, scale=1):
-                submitBtn = gr.Button("Submit", variant="primary")
-        with gr.Column(scale=1):
-            emptyBtn = gr.Button("Clear History")
+        emptyBtn = gr.Button("🧹 Clear History (清除历史对话)")
+        submitBtn = gr.Button("🚀 Submit (发送)")
 
     submitBtn.click(predict, [query, chatbot], [chatbot], show_progress=True)
     submitBtn.click(reset_user_input, [], [query])
     emptyBtn.click(reset_state, outputs=[chatbot], show_progress=True)
+    
+    gr.Markdown(
+        """<font size=2>Note: This demo is governed by the original license of Qwen-7B. We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content, including hate speech, violence, pornography, deception, etc. (注:本演示受Qwen-7B的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)"""
+    )
 
 if len(sys.argv) > 1:
     demo.queue().launch(**vars(args))