Danielrahmai1991 commited on
Commit
dc1fe45
·
verified ·
1 Parent(s): 13afdfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -47,14 +47,14 @@ def postprocerssing(inp_text: str):
47
 
48
 
49
  def llm_run(prompt, max_length, top_p, temprature, top_k, messages):
50
-
51
  lang = single_detection(prompt, api_key='4ab77f25578d450f0902fb42c66d5e11')
52
  if lang == 'en':
53
  prompt = error_correct_pyspeller(prompt)
54
  en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
55
  messages.append({"role": "user", "content": en_translated})
56
  # messages.append({"role": "user", "content": prompt})
57
- print("messages")
58
  input_ids = tokenizer.apply_chat_template(
59
  messages,
60
  add_generation_prompt = True,
@@ -67,7 +67,6 @@ def llm_run(prompt, max_length, top_p, temprature, top_k, messages):
67
  generate_kwargs = dict(
68
  max_length=int(max_length),top_p=float(top_p), do_sample=True,
69
  top_k=int(top_k), streamer=streamer, temperature=int(temprature), repetition_penalty=1.2
70
-
71
  )
72
 
73
  t = Thread(target=model.generate, args=(input_ids,), kwargs=generate_kwargs)
 
47
 
48
 
49
  def llm_run(prompt, max_length, top_p, temprature, top_k, messages):
50
+ print("prompt, max_length, top_p, temprature, top_k, messages", prompt, max_length, top_p, temprature, top_k, messages)
51
  lang = single_detection(prompt, api_key='4ab77f25578d450f0902fb42c66d5e11')
52
  if lang == 'en':
53
  prompt = error_correct_pyspeller(prompt)
54
  en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
55
  messages.append({"role": "user", "content": en_translated})
56
  # messages.append({"role": "user", "content": prompt})
57
+ print("messages", messages)
58
  input_ids = tokenizer.apply_chat_template(
59
  messages,
60
  add_generation_prompt = True,
 
67
  generate_kwargs = dict(
68
  max_length=int(max_length),top_p=float(top_p), do_sample=True,
69
  top_k=int(top_k), streamer=streamer, temperature=int(temprature), repetition_penalty=1.2
 
70
  )
71
 
72
  t = Thread(target=model.generate, args=(input_ids,), kwargs=generate_kwargs)