lmw41 commited on
Commit
9cb5d44
·
1 Parent(s): 25db72d

feat: add app.py and images

Browse files
Files changed (6) hide show
  1. .DS_Store +0 -0
  2. app.py +89 -0
  3. lamini.png +0 -0
  4. man.png +0 -0
  5. robot.png +0 -0
  6. woman.png +0 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
app.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from huggingface_hub import InferenceClient
3
+ from langchain import HuggingFaceHub
4
+ import requests
5
+ # Internal usage
6
+ import os
7
+ from dotenv import load_dotenv
8
+ from time import sleep
9
+
10
+ load_dotenv()
11
+ # Set HF API token
12
+ HUGGINGFACEHUB_API_TOKEN = os.getenv('HF_TOKEN')
13
+
14
+ #AVATARS
15
+ av_us = './man.png' #"🦖" #A single emoji, e.g. "🧑‍💻", "🤖", "🦖". Shortcodes are not supported.
16
+ av_ass = './robot.png'
17
+
18
+ # FUNCTION TO LOG ALL CHAT MESSAGES INTO chathistory.txt
19
+ def writehistory(text):
20
+ with open('chathistory.txt', 'a') as f:
21
+ f.write(text)
22
+ f.write('\n')
23
+ f.close()
24
+
25
+ repo="HuggingFaceH4/starchat-beta"
26
+
27
+ ### START STREAMLIT UI
28
+ st.title("🤗 AI 聊天機器人 測試版")
29
+ st.subheader("支援中文對話")
30
+
31
+ # Set a default model
32
+ if "hf_model" not in st.session_state:
33
+ st.session_state["hf_model"] = "HuggingFaceH4/starchat-beta"
34
+
35
+ ### INITIALIZING STARCHAT FUNCTION MODEL
36
+ def starchat(model,myprompt, your_template):
37
+ from langchain import PromptTemplate, LLMChain
38
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
39
+ llm = HuggingFaceHub(repo_id=model ,
40
+ model_kwargs={"min_length":30,
41
+ "max_new_tokens":1024, "do_sample":True,
42
+ "temperature":0.2, "top_k":50,
43
+ "top_p":0.95, "eos_token_id":49155})
44
+ template = your_template
45
+ prompt = PromptTemplate(template=template, input_variables=["myprompt"])
46
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
47
+ llm_reply = llm_chain.run(myprompt)
48
+ reply = llm_reply.partition('<|end|>')[0]
49
+ return reply
50
+
51
+
52
+ # Initialize chat history
53
+ if "messages" not in st.session_state:
54
+ st.session_state.messages = []
55
+
56
+ # Display chat messages from history on app rerun
57
+ for message in st.session_state.messages:
58
+ if message["role"] == "user":
59
+ with st.chat_message(message["role"],avatar=av_us):
60
+ st.markdown(message["content"])
61
+ else:
62
+ with st.chat_message(message["role"],avatar=av_ass):
63
+ st.markdown(message["content"])
64
+
65
+ # Accept user input
66
+ if myprompt := st.chat_input("請介紹台灣"):
67
+ # Add user message to chat history
68
+ st.session_state.messages.append({"role": "user", "content": myprompt})
69
+ # Display user message in chat message container
70
+ with st.chat_message("user", avatar=av_us):
71
+ st.markdown(myprompt)
72
+ usertext = f"user: {myprompt}"
73
+ writehistory(usertext)
74
+ # Display assistant response in chat message container
75
+ with st.chat_message("assistant"):
76
+ message_placeholder = st.empty()
77
+ full_response = ""
78
+ res = starchat(
79
+ st.session_state["hf_model"],
80
+ myprompt, "<|system|>\n<|end|>\n<|user|>\n{myprompt}<|end|>\n<|assistant|>")
81
+ response = res.split(" ")
82
+ for r in response:
83
+ full_response = full_response + r + " "
84
+ message_placeholder.markdown(full_response + "▌")
85
+ sleep(0.1)
86
+ message_placeholder.markdown(full_response)
87
+ asstext = f"assistant: {full_response}"
88
+ writehistory(asstext)
89
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
lamini.png ADDED
man.png ADDED
robot.png ADDED
woman.png ADDED