xl2533 commited on
Commit
4d8fe4d
·
1 Parent(s): 67270bc
app.py CHANGED
@@ -2,13 +2,34 @@ import gradio as gr
2
  import os
3
  import json
4
  import requests
 
 
 
5
 
6
  # Streaming endpoint
7
  API_URL = "https://api.openai.com/v1/chat/completions" # os.getenv("API_URL") + "/generate_stream"
 
 
8
 
 
 
 
9
 
10
- def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[],
11
- history=[]): # repetition_penalty, top_k
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  payload = {
14
  "model": "gpt-3.5-turbo",
@@ -27,21 +48,35 @@ def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[]
27
  }
28
 
29
  print(f"chat_counter - {chat_counter}")
 
30
  if chat_counter != 0:
31
  messages = []
32
- for data in chatbot:
33
- temp1 = {}
34
- temp1["role"] = "user"
35
- temp1["content"] = data[0]
36
- temp2 = {}
37
- temp2["role"] = "assistant"
38
- temp2["content"] = data[1]
39
- messages.append(temp1)
40
- messages.append(temp2)
41
- temp3 = {}
42
- temp3["role"] = "user"
43
- temp3["content"] = inputs
44
- messages.append(temp3)
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # messages
46
  payload = {
47
  "model": "gpt-3.5-turbo",
@@ -56,11 +91,13 @@ def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[]
56
 
57
  chat_counter += 1
58
 
 
59
  history.append(inputs)
60
  print(f"payload is - {payload}")
61
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
 
 
62
  response = requests.post(API_URL, headers=headers, json=payload, stream=True)
63
- # response = requests.post(API_URL, headers=headers, json=payload, stream=True)
64
  token_counter = 0
65
  partial_words = ""
66
 
@@ -83,36 +120,24 @@ def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[]
83
  history[-1] = partial_words
84
  chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)] # convert to tuples of list
85
  token_counter += 1
86
- yield chat, history, chat_counter # resembles {chatbot: chat, state: history}
87
 
88
 
89
  def reset_textbox():
90
  return gr.update(value='')
91
 
92
 
93
- title = """<h1 align="center">🔥Finance ChatBot 🚀Streaming🚀</h1>"""
94
- description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
95
- ```
96
- User: <utterance>
97
- Assistant: <utterance>
98
- User: <utterance>
99
- Assistant: <utterance>
100
- ...
101
- ```
102
- In this app, you can explore the outputs of a gpt-3.5-turbo LLM.
103
- """
104
-
105
  with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
106
  #chatbot {height: 520px; overflow: auto;}""") as demo:
107
- gr.HTML(title)
108
- gr.HTML(
109
- '''<center><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
110
  with gr.Column(elem_id="col_container"):
111
  openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
112
  chatbot = gr.Chatbot(elem_id='chatbot') # c
113
- inputs = gr.Textbox(placeholder="Hi there!", label="Type an input and press Enter") # t
114
- state = gr.State([]) # s
115
- b1 = gr.Button()
 
 
116
 
117
  # inputs, top_p, temperature, top_k, repetition_penalty
118
  with gr.Accordion("Parameters", open=False):
@@ -123,13 +148,15 @@ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-r
123
  # top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
124
  # repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
125
  chat_counter = gr.Number(value=0, visible=False, precision=0)
 
126
 
127
- inputs.submit(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state],
128
  [chatbot, state, chat_counter], )
129
- b1.click(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state],
130
- [chatbot, state, chat_counter], )
131
- b1.click(reset_textbox, [], [inputs])
 
 
132
  inputs.submit(reset_textbox, [], [inputs])
133
 
134
- # gr.Markdown(description)
135
- demo.queue().launch(debug=True)
 
2
  import os
3
  import json
4
  import requests
5
+ from langchain import FAISS
6
+ from langchain.prompts import PromptTemplate
7
+ from langchain.embeddings import CohereEmbeddings, HuggingFaceInstructEmbeddings
8
 
9
  # Streaming endpoint
10
  API_URL = "https://api.openai.com/v1/chat/completions" # os.getenv("API_URL") + "/generate_stream"
11
+ embedding_key = '5IRbILAbjTI0VcqTsktBfKsr13Lych9iBAFbLpkj'
12
+ faiss_store = './output/数字经济'
13
 
14
+ # load the prompts
15
+ with open("prompts/combine_prompt.txt", "r") as f:
16
+ template = f.read()
17
 
18
+ with open("prompts/combine_prompt_hist.txt", "r") as f:
19
+ template_hist = f.read()
20
+
21
+ with open("prompts/question_prompt.txt", "r") as f:
22
+ template_quest = f.read()
23
+
24
+ with open("prompts/chat_combine_prompt.txt", "r") as f:
25
+ chat_combine_template = f.read()
26
+
27
+ with open("prompts/chat_reduce_prompt.txt", "r") as f:
28
+ chat_reduce_template = f.read()
29
+
30
+
31
+ def predict(inputs, top_p, temperature, openai_api_key, enable_index,
32
+ chat_counter, chatbot=[], history=[]): # repetition_penalty, top_k
33
 
34
  payload = {
35
  "model": "gpt-3.5-turbo",
 
48
  }
49
 
50
  print(f"chat_counter - {chat_counter}")
51
+ # 如果有历史对话,把对话拼接进入上下文
52
  if chat_counter != 0:
53
  messages = []
54
+ if enable_index:
55
+ history = json.loads(history)
56
+ template_temp = template_hist.replace("{historyquestion}", history[0]).replace("{historyanswer}",
57
+ history[1])
58
+ c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template_temp,
59
+ template_format="jinja2")
60
+ else:
61
+ for data in chatbot:
62
+ temp1 = {}
63
+ temp1["role"] = "user"
64
+ temp1["content"] = data[0]
65
+ temp2 = {}
66
+ temp2["role"] = "assistant"
67
+ temp2["content"] = data[1]
68
+ messages.append(temp1)
69
+ messages.append(temp2)
70
+
71
+ # Faiss 检索最近的embedding
72
+ if enable_index:
73
+ docsearch = FAISS.load_local(faiss_store, CohereEmbeddings(cohere_api_key=embedding_key))
74
+ else:
75
+ temp3 = {}
76
+ temp3["role"] = "user"
77
+ temp3["content"] = inputs
78
+ messages.append(temp3)
79
+
80
  # messages
81
  payload = {
82
  "model": "gpt-3.5-turbo",
 
91
 
92
  chat_counter += 1
93
 
94
+ #list of user input
95
  history.append(inputs)
96
  print(f"payload is - {payload}")
97
+ print(f'chatbot - {chatbot}')
98
+ print(f'chatbot - {chatbot}')
99
+
100
  response = requests.post(API_URL, headers=headers, json=payload, stream=True)
 
101
  token_counter = 0
102
  partial_words = ""
103
 
 
120
  history[-1] = partial_words
121
  chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)] # convert to tuples of list
122
  token_counter += 1
123
+ yield chat, history, chat_counter
124
 
125
 
126
  def reset_textbox():
127
  return gr.update(value='')
128
 
129
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
131
  #chatbot {height: 520px; overflow: auto;}""") as demo:
132
+ gr.HTML("""<h1 align="center">🚀Finance ChatBot🚀</h1>""")
 
 
133
  with gr.Column(elem_id="col_container"):
134
  openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
135
  chatbot = gr.Chatbot(elem_id='chatbot') # c
136
+ inputs = gr.Textbox(placeholder="您有什么问题可以问我", label="输入数字经济,两会,硅谷银行相关的呢提")
137
+ state = gr.State([])
138
+
139
+ clear = gr.Button("Clear")
140
+ run = gr.Button("Run")
141
 
142
  # inputs, top_p, temperature, top_k, repetition_penalty
143
  with gr.Accordion("Parameters", open=False):
 
148
  # top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
149
  # repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
150
  chat_counter = gr.Number(value=0, visible=False, precision=0)
151
+ enable_index = gr.Checkbox(label='是', info='是否使用研报')
152
 
153
+ inputs.submit(predict, [inputs, top_p, temperature, openai_api_key, enable_index, chat_counter, chatbot, state],
154
  [chatbot, state, chat_counter], )
155
+ run.click(predict, [inputs, top_p, temperature, openai_api_key, enable_index, chat_counter, chatbot, state],
156
+ [chatbot, state, chat_counter], )
157
+
158
+ #每次对话结束都重置对话框
159
+ clear.click(reset_textbox, [], [inputs], queue=False)
160
  inputs.submit(reset_textbox, [], [inputs])
161
 
162
+ demo.queue().launch(debug=True)
 
prompts/chat_combine_prompt.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ You are a DocsGPT, friendly and helpful AI assistant by Arc53 that provides help with documents. You give thorough answers with code examples if possible.
2
+ Use the following pieces of context to help answer the users question.
3
+ ----------------
4
+ {summaries}
prompts/chat_reduce_prompt.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Use the following portion of a long document to see if any of the text is relevant to answer the question.
2
+ {context}
3
+ Provide all relevant text to the question verbatim. Summarize if needed. If nothing relevant return "-".
prompts/combine_prompt.txt ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a DocsGPT, friendly and helpful AI assistant by Arc53 that provides help with documents. You give thorough answers with code examples if possible.
2
+
3
+ QUESTION: How to merge tables in pandas?
4
+ =========
5
+ Content: pandas provides various facilities for easily combining together Series or DataFrame with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations.
6
+ Source: 28-pl
7
+ Content: pandas provides a single function, merge(), as the entry point for all standard database join operations between DataFrame or named Series objects: \n\npandas.merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None)
8
+ Source: 30-pl
9
+ =========
10
+ FINAL ANSWER: To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is: \n\npd.merge(left, right, on, how) \n\nwhere left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform. \n\nFor example, to merge the two tables df1 and df2 on the column 'id', you can use: \n\npd.merge(df1, df2, on='id', how='inner')
11
+ SOURCES: 28-pl 30-pl
12
+
13
+ QUESTION: How are you?
14
+ =========
15
+ CONTENT:
16
+ SOURCE:
17
+ =========
18
+ FINAL ANSWER: I am fine, thank you. How are you?
19
+ SOURCES:
20
+
21
+ QUESTION: {{ question }}
22
+ =========
23
+ {{ summaries }}
24
+ =========
25
+ FINAL ANSWER:
prompts/combine_prompt_hist.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a DocsGPT, friendly and helpful AI assistant by Arc53 that provides help with documents. You give thorough answers with code examples if possible.
2
+
3
+ QUESTION: How to merge tables in pandas?
4
+ =========
5
+ Content: pandas provides various facilities for easily combining together Series or DataFrame with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations.
6
+ Source: 28-pl
7
+ Content: pandas provides a single function, merge(), as the entry point for all standard database join operations between DataFrame or named Series objects: \n\npandas.merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None)
8
+ Source: 30-pl
9
+ =========
10
+ FINAL ANSWER: To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is: \n\npd.merge(left, right, on, how) \n\nwhere left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform. \n\nFor example, to merge the two tables df1 and df2 on the column 'id', you can use: \n\npd.merge(df1, df2, on='id', how='inner')
11
+ SOURCES: 28-pl 30-pl
12
+
13
+ QUESTION: How are you?
14
+ =========
15
+ CONTENT:
16
+ SOURCE:
17
+ =========
18
+ FINAL ANSWER: I am fine, thank you. How are you?
19
+ SOURCES:
20
+
21
+ QUESTION: {{ historyquestion }}
22
+ =========
23
+ CONTENT:
24
+ SOURCE:
25
+ =========
26
+ FINAL ANSWER: {{ historyanswer }}
27
+ SOURCES:
28
+
29
+ QUESTION: {{ question }}
30
+ =========
31
+ {{ summaries }}
32
+ =========
33
+ FINAL ANSWER:
prompts/question_prompt.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Use the following portion of a long document to see if any of the text is relevant to answer the question.
2
+ {{ context }}
3
+ Question: {{ question }}
4
+ Provide all relevant text to the question verbatim. Summarize if needed. If nothing relevant return "-".