You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
38 lines
1.1 KiB
Python
38 lines
1.1 KiB
Python
1 year ago
|
import streamlit as st
|
||
|
from streamlit_chat import message
|
||
|
from fastllm_pytools import llm
|
||
|
import sys
|
||
|
|
||
|
st.set_page_config(
|
||
|
page_title="fastllm web demo",
|
||
|
page_icon=":robot:"
|
||
|
)
|
||
|
|
||
|
@st.cache_resource
|
||
|
def get_model():
|
||
|
model = llm.model(sys.argv[1])
|
||
|
return model
|
||
|
|
||
|
if "messages" not in st.session_state:
|
||
|
st.session_state.messages = []
|
||
|
|
||
|
for i, (prompt, response) in enumerate(st.session_state.messages):
|
||
|
with st.chat_message("user"):
|
||
|
st.markdown(prompt)
|
||
|
with st.chat_message("assistant"):
|
||
|
st.markdown(response)
|
||
|
|
||
|
if prompt := st.chat_input("请开始对话"):
|
||
|
model = get_model()
|
||
|
with st.chat_message("user"):
|
||
|
st.markdown(prompt)
|
||
|
|
||
|
with st.chat_message("assistant"):
|
||
|
message_placeholder = st.empty()
|
||
|
full_response = ""
|
||
|
for chunk in model.stream_response(prompt, st.session_state.messages, one_by_one = True):
|
||
|
full_response += chunk
|
||
|
message_placeholder.markdown(full_response + "▌")
|
||
|
message_placeholder.markdown(full_response)
|
||
|
st.session_state.messages.append((prompt, full_response))
|