Harshithacj123 commited on
Commit
1779d10
·
verified ·
1 Parent(s): 6b2c098

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -23,15 +23,14 @@ You are a helpful bot. Your answers are clear and concise.
23
 
24
  """
25
 
26
- # Formatting function for message and history
27
- def format_message(message: str) -> str:
28
 
29
  formatted_message = f"<s>[INST] {message} [/INST]"
30
 
31
  return formatted_message
32
 
33
  # Generate a response from the Llama model
34
- def get_llama_response(message: str) -> str:
35
  """
36
  Generates a conversational response from the Llama model.
37
  Parameters:
@@ -40,7 +39,7 @@ def get_llama_response(message: str) -> str:
40
  Returns:
41
  str: Generated response from the Llama model.
42
  """
43
- query = format_message(message)
44
  response = ""
45
 
46
  sequences = llama_pipeline(
@@ -49,14 +48,15 @@ def get_llama_response(message: str) -> str:
49
  top_k=10,
50
  num_return_sequences=1,
51
  eos_token_id=tokenizer.eos_token_id,
52
- max_length=256,
53
  )
54
 
55
  generated_text = sequences[0]['generated_text']
56
  response = generated_text[len(query):] # Remove the prompt from the output
57
-
58
  print("Chatbot:", response.strip())
59
  return response.strip()
60
 
61
- get_llama_response("What are the key operating conditions of the reverse water gas shift (RWGS) reaction in an industrial catalytic reactor for the production of syngas from carbon dioxide (CO2) and hydrogen (H2)?")
62
- #gr.ChatInterface(get_llama_response).launch()
 
 
23
 
24
  """
25
 
26
+ def format_message(message: str, history: list, memory_limit: int = 3) -> str:
 
27
 
28
  formatted_message = f"<s>[INST] {message} [/INST]"
29
 
30
  return formatted_message
31
 
32
  # Generate a response from the Llama model
33
+ def get_llama_response(message: str, history: list) -> str:
34
  """
35
  Generates a conversational response from the Llama model.
36
  Parameters:
 
39
  Returns:
40
  str: Generated response from the Llama model.
41
  """
42
+ query = format_message(message, history)
43
  response = ""
44
 
45
  sequences = llama_pipeline(
 
48
  top_k=10,
49
  num_return_sequences=1,
50
  eos_token_id=tokenizer.eos_token_id,
51
+ max_length=512,
52
  )
53
 
54
  generated_text = sequences[0]['generated_text']
55
  response = generated_text[len(query):] # Remove the prompt from the output
56
+
57
  print("Chatbot:", response.strip())
58
  return response.strip()
59
 
60
+
61
+ gr.ChatInterface(get_llama_response).launch()
62
+