Spaces:
Paused
Paused
tamas.kiss
commited on
Commit
·
d56b068
1
Parent(s):
245d66c
Update logs
Browse files
app.py
CHANGED
@@ -258,8 +258,6 @@ def text_to_text_generation(verbose, prompt):
|
|
258 |
)
|
259 |
response = response[len(is_kubectl_prompt) :]
|
260 |
|
261 |
-
print(f'{" Query Start ":-^40}')
|
262 |
-
print("Classified as: " + response)
|
263 |
|
264 |
response_num = 0 if "0" in response else (1 if "1" in response else 2)
|
265 |
|
@@ -310,11 +308,17 @@ def text_to_text_generation(verbose, prompt):
|
|
310 |
return (prompt, answer[start:end].strip())
|
311 |
|
312 |
modes = ["Kubectl command", "Kubernetes definition", "Normal"]
|
|
|
|
|
|
|
|
|
313 |
modes[response_num] = f"**{modes[response_num]}**"
|
314 |
modes = " / ".join(modes)
|
315 |
|
|
|
316 |
if response_num == 2:
|
317 |
prompt = create_generation_prompt(response_num, prompt, False)
|
|
|
318 |
original, new = generate_batch(prompt)[0]
|
319 |
prompt, response = cleanup(original, new)
|
320 |
if verbose:
|
@@ -324,6 +328,7 @@ def text_to_text_generation(verbose, prompt):
|
|
324 |
|
325 |
if response_num == 0:
|
326 |
prompt = create_generation_prompt(response_num, prompt, False)
|
|
|
327 |
original, new = generate_batch(prompt)[0]
|
328 |
prompt, response = cleanup(original, new)
|
329 |
model_response = new[len(original):].strip()
|
@@ -341,8 +346,11 @@ def text_to_text_generation(verbose, prompt):
|
|
341 |
return f"{modes}\n\n" f"# Answer:\n" f"```bash\n{str_to_md(response)}\n```\n"
|
342 |
|
343 |
res_prompt = create_generation_prompt(response_num, prompt, False)
|
|
|
344 |
res_semantic_search_prompt = create_generation_prompt(response_num, prompt, "semantic_search")
|
|
|
345 |
res_google_search_prompt = create_generation_prompt(response_num, prompt, "google_search")
|
|
|
346 |
|
347 |
gen_normal, gen_semantic_search, gen_google_search = generate_batch(
|
348 |
res_prompt, res_semantic_search_prompt, res_google_search_prompt
|
|
|
258 |
)
|
259 |
response = response[len(is_kubectl_prompt) :]
|
260 |
|
|
|
|
|
261 |
|
262 |
response_num = 0 if "0" in response else (1 if "1" in response else 2)
|
263 |
|
|
|
308 |
return (prompt, answer[start:end].strip())
|
309 |
|
310 |
modes = ["Kubectl command", "Kubernetes definition", "Normal"]
|
311 |
+
|
312 |
+
print(f'{" Query Start ":-^40}')
|
313 |
+
print("Classified as: " + modes[response_num])
|
314 |
+
|
315 |
modes[response_num] = f"**{modes[response_num]}**"
|
316 |
modes = " / ".join(modes)
|
317 |
|
318 |
+
|
319 |
if response_num == 2:
|
320 |
prompt = create_generation_prompt(response_num, prompt, False)
|
321 |
+
print('Prompt given to model:\n' + prompt + '\n')
|
322 |
original, new = generate_batch(prompt)[0]
|
323 |
prompt, response = cleanup(original, new)
|
324 |
if verbose:
|
|
|
328 |
|
329 |
if response_num == 0:
|
330 |
prompt = create_generation_prompt(response_num, prompt, False)
|
331 |
+
print('Prompt given to model:\n' + prompt + '\n')
|
332 |
original, new = generate_batch(prompt)[0]
|
333 |
prompt, response = cleanup(original, new)
|
334 |
model_response = new[len(original):].strip()
|
|
|
346 |
return f"{modes}\n\n" f"# Answer:\n" f"```bash\n{str_to_md(response)}\n```\n"
|
347 |
|
348 |
res_prompt = create_generation_prompt(response_num, prompt, False)
|
349 |
+
print(f'Prompt given to finetuned model:\n{res_google_search_prompt}\n')
|
350 |
res_semantic_search_prompt = create_generation_prompt(response_num, prompt, "semantic_search")
|
351 |
+
print(f'Prompt given to model with RAG:\n{res_semantic_search_prompt}\n')
|
352 |
res_google_search_prompt = create_generation_prompt(response_num, prompt, "google_search")
|
353 |
+
print(f'Prompt given to model with Google search:\n{res_google_search_prompt}\n')
|
354 |
|
355 |
gen_normal, gen_semantic_search, gen_google_search = generate_batch(
|
356 |
res_prompt, res_semantic_search_prompt, res_google_search_prompt
|