Omnibus commited on
Commit
b5b79c5
·
verified ·
1 Parent(s): 2dd48ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -1
app.py CHANGED
@@ -125,6 +125,42 @@ def format_prompt(message, history):
125
  prompt += f"[INST] {message} [/INST]"
126
  return prompt
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
 
130
  def run_gpt(
@@ -266,7 +302,7 @@ def compress_data_og(c, instruct, history):
266
  def get_chart(inp):
267
  seed=random.randint(1,1000000000)
268
  try:
269
- resp = run_gpt(
270
  GET_CHART,
271
  stop_tokens=["observation:", "task:", "action:", "thought:"],
272
  max_tokens=8192,
 
125
  prompt += f"[INST] {message} [/INST]"
126
  return prompt
127
 
128
+ def run_gpt_no_prefix(
129
+ prompt_template,
130
+ stop_tokens,
131
+ max_tokens,
132
+ seed,
133
+ **prompt_kwargs,
134
+ ):
135
+ print(seed)
136
+ timestamp=datetime.datetime.now()
137
+
138
+ generate_kwargs = dict(
139
+ temperature=0.9,
140
+ max_new_tokens=max_tokens,
141
+ top_p=0.95,
142
+ repetition_penalty=1.0,
143
+ do_sample=True,
144
+ seed=seed,
145
+ )
146
+
147
+ content = prompt_template.format(**prompt_kwargs)
148
+ #if VERBOSE:
149
+ print(LOG_PROMPT.format(content))
150
+
151
+
152
+ #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
153
+ #formatted_prompt = format_prompt(f'{content}', history)
154
+
155
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
156
+ resp = ""
157
+ for response in stream:
158
+ resp += response.token.text
159
+ #yield resp
160
+
161
+ if VERBOSE:
162
+ print(LOG_RESPONSE.format(resp))
163
+ return resp
164
 
165
 
166
  def run_gpt(
 
302
  def get_chart(inp):
303
  seed=random.randint(1,1000000000)
304
  try:
305
+ resp = run_gpt_no_prefix(
306
  GET_CHART,
307
  stop_tokens=["observation:", "task:", "action:", "thought:"],
308
  max_tokens=8192,