krrishD commited on
Commit
aadc18f
·
1 Parent(s): 230b925

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -12
app.py CHANGED
@@ -73,14 +73,23 @@ Follow up: Could you try to increase the shared memory and try setting num_worke
73
  Intermediate Answer: It worked! Can you explain what happened here?
74
  So the final answer is: The error usually means that your system doesn’t provide enough shared memory for multiple workers (used via num_workers>0). Check the shared memory limitation of your system and try to increase it.
75
 
76
-
77
  StackTrace:
78
 
79
- RuntimeError: size mismatch (got input: [100000], target: [1000])
 
 
 
 
 
80
 
81
- Question: Any idea how I can solve this problem?
82
- Are follow up questions needed here: No
83
- So the final answer is: I don’t know which line of code creates the shape mismatch but would guess it’s raised in the loss calculation: loss = criterion(output.view(-1), batch['input_ids'].view(-1).to(device)). Print the shapes of both tensors and make sure they are expected in the used criterion. PS you can post code snippets by wrapping them into three backticks ```, which would make your code easier to read.
 
 
 
 
 
84
 
85
  StackTrace: ''',
86
  '''
@@ -163,16 +172,37 @@ def initial_query_builder(language, code, question, intermediate = "\nIntermedia
163
  cur_prompt = prompt[0] + language + prompt[1] + code + prompt[2] + question + prompt[3]
164
 
165
  # print("prompt: ", cur_prompt, end ='')
166
-
167
- ret_text = call_gpt(cur_prompt, intermediate)
168
- print("ret_text: ", ret_text)
169
- print("get_last_line(ret_text): ", get_last_line(ret_text))
 
 
 
 
 
 
 
 
170
  return ret_text
171
 
172
  def subsequent_query_builder(curr_prompt, external_answer, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'):
 
173
  curr_prompt += intermediate + ' ' + external_answer + '.'
174
- print(intermediate + ' ' + yellowfy(external_answer) + '.', end='' )
175
- ret_text = call_gpt(curr_prompt, intermediate)
 
 
 
 
 
 
 
 
 
 
 
 
176
  return ret_text
177
 
178
  """subsequent query builder:
@@ -196,7 +226,7 @@ def prompt_builder(history, intermediate = "\nIntermediate Answer:", followup =
196
  curr_prompt = prompt[0] + language + prompt[1] + stacktrace + prompt[2] + question + prompt[3]
197
 
198
  #set subsequent conversation thread
199
- if len(history) > 2: #subsequent conversations have occurred
200
  curr_prompt += history[1][1] ## get the first response to the stacktrace prompt
201
  for conversation in history[2:]:
202
  #grab intermediate answer
 
73
  Intermediate Answer: It worked! Can you explain what happened here?
74
  So the final answer is: The error usually means that your system doesn’t provide enough shared memory for multiple workers (used via num_workers>0). Check the shared memory limitation of your system and try to increase it.
75
 
 
76
  StackTrace:
77
 
78
+ Traceback (most recent call last):
79
+ File "main.py", line 39, in <module>
80
+ request = create_request(page)
81
+ File "main.py", line 15, in create_request
82
+ url = base_url + data
83
+ TypeError: can only concatenate str (not "bytes") to str
84
 
85
+
86
+ Question: How do I fix this?
87
+ Are follow up questions needed here: Yes
88
+ Follow up: Could you try to decode the data before passing it to the url?
89
+ Intermediate Answer: Yes, it made the data a string and worked!
90
+ So the final answer is: You can try to decode the data before passing it to the url like this:
91
+
92
+ data = urllib.parse.unquote(data)
93
 
94
  StackTrace: ''',
95
  '''
 
172
  cur_prompt = prompt[0] + language + prompt[1] + code + prompt[2] + question + prompt[3]
173
 
174
  # print("prompt: ", cur_prompt, end ='')
175
+ ## check if follow up in the query, if not, make sure it contains the final answer. otherwise re-run until at least one of the 2 is in the response. break after 3 attempts.
176
+ attempts = 0
177
+ ret_text = ''
178
+ while followup not in ret_text and finalans not in ret_text:
179
+ attempts +=1
180
+ ret_text = call_gpt(cur_prompt, intermediate)
181
+ print(str(attempts) + " ret_text:", ret_text)
182
+ if attempts == 3:
183
+ break
184
+ if "final answer is" in ret_text:
185
+ updated_prompt = cur_prompt + re.findall(r".*?(?=is:)", ret_text)[0] + " is: Let's think step-by-step. "
186
+ ret_text = call_gpt(updated_prompt, intermediate)
187
  return ret_text
188
 
189
  def subsequent_query_builder(curr_prompt, external_answer, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'):
190
+ print("curr_prompt: ", curr_prompt)
191
  curr_prompt += intermediate + ' ' + external_answer + '.'
192
+ ## check if follow up in the query, if not, make sure it contains the final answer. otherwise re-run until at least one of the 2 is in the response. break after 3 attempts.
193
+ attempts = 0
194
+ ret_text = ''
195
+ while followup not in ret_text and finalans not in ret_text:
196
+ attempts +=1
197
+ ret_text = call_gpt(curr_prompt, intermediate)
198
+ print("subsequent query " + str(attempts) + " ret_text:", ret_text)
199
+ if attempts == 3:
200
+ break
201
+ print("ret_text: ", ret_text)
202
+ if "final answer is" in ret_text:
203
+ updated_prompt = curr_prompt + re.findall(r".*?(?=is:)", ret_text)[0] + " is: Let's think step-by-step. "
204
+ # print("updated_prompt: ", updated_prompt)
205
+ ret_text = call_gpt(updated_prompt, intermediate)
206
  return ret_text
207
 
208
  """subsequent query builder:
 
226
  curr_prompt = prompt[0] + language + prompt[1] + stacktrace + prompt[2] + question + prompt[3]
227
 
228
  #set subsequent conversation thread
229
+ if len(history) >= 2: #subsequent conversations have occurred
230
  curr_prompt += history[1][1] ## get the first response to the stacktrace prompt
231
  for conversation in history[2:]:
232
  #grab intermediate answer