Add python webui & Add simple run shell script

main
wsl-wy 2 years ago
parent a30bad0ae6
commit df25dee167

@ -0,0 +1,45 @@
#!/usr/bin/env bash
cd "$(dirname "$0")"
thisDir=$(pwd)
function performInstall() {
set -e
pushd "$thisDir"
pip3 install -r requirements.txt
pip3 install gradio mdtex2html scipy
if [[ ! -d flash-attention ]]; then
if ! git clone -b v1.0.8 https://github.com/Dao-AILab/flash-attention; then
echo "Clone flash-attention failed, please install it manually."
return 0
fi
fi
cd flash-attention &&
pip3 install . &&
pip3 install csrc/layer_norm &&
pip3 install csrc/rotary ||
echo "Install flash-attention failed, please install it manually."
popd
}
echo "Starting WebUI..."
if ! python3 web_demo.py; then
echo "Run demo failed, install the deps and try again? (y/n)"
# auto perform install if in docker
if [[ -t 0 ]] && [[ -t 1 ]] && [[ ! -f "/.dockerenv" ]]; then
read doInstall
else
doInstall="y"
fi
if ! [[ "$doInstall" =~ y|Y ]]; then
exit 1
fi
echo "Installing deps, and try again..."
performInstall && python3 web_demo.py
fi

@ -0,0 +1,109 @@
#!/usr/bin/env python3
""" Ref: https://github.com/THUDM/ChatGLM2-6B/blob/main/web_demo.py """
from transformers import AutoTokenizer
import gradio as gr
import mdtex2html
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
import sys
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
if len(sys.argv) > 1 and sys.argv[1] == "--exit":
exit(0)
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict(input, chatbot, history, past_key_values):
print('Q: ' + parse_text(input))
chatbot.append((parse_text(input), ""))
fullResponse = "";
for response in model.chat(tokenizer, input, history=history, stream=True):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history, past_key_values
fullResponse = parse_text(response);
print("A: " + parse_text(fullResponse))
def reset_user_input():
return gr.update(value='')
def reset_state():
return [], [], None
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">通义千问 - QwenLM/Qwen-7B</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
history = gr.State([])
past_key_values = gr.State(None)
submitBtn.click(predict, [user_input, chatbot, history, past_key_values],
[chatbot, history, past_key_values], show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True)
demo.queue().launch(share=False, inbrowser=True, server_port=80, server_name="0.0.0.0")
Loading…
Cancel
Save