mtyrrell commited on
Commit
c11ca23
·
verified ·
1 Parent(s): c3df2b8

Update appStore/rag.py

Browse files
Files changed (1) hide show
  1. appStore/rag.py +26 -17
appStore/rag.py CHANGED
@@ -32,40 +32,49 @@ def get_prompt(context, label):
32
  # def completion_with_backoff(**kwargs):
33
  # return openai.ChatCompletion.create(**kwargs)
34
 
35
- # construct query, send to HF API and process response
 
 
 
 
 
 
 
 
 
36
  def run_query(context, label, model_sel_name):
37
  '''
38
- For non-streamed completion, enable the following 2 lines and comment out the code below
39
  '''
40
  chatbot_role = """You are an analyst specializing in climate change impact assessments and producing insights from policy documents."""
41
  messages = [{"role": "system", "content": chatbot_role},{"role": "user", "content": get_prompt(context, label)}]
42
 
43
  # Initialize the client, pointing it to one of the available models
44
- client = InferenceClient(model_sel_name, token = hf_token)
45
 
46
- # instantiate ChatCompletion as a generator object (stream is set to True)
47
- chat_completion = client.chat_completion(
48
  messages=messages,
49
  stream=True
50
  )
51
- # chat_completion = completion_with_backoff(messages=messages, stream=True)
52
 
53
- # iterate through the streamed output
54
- report = []
55
  res_box = st.empty()
 
 
56
  for chunk in chat_completion:
57
- # extract the object containing the text (totally different structure when streaming)
58
- if chunk.choices is not None: # sometimes returns None - probably the prompt needs work
59
  chunk_message = chunk.choices[0].delta
60
- # test to make sure there is text in the object (some don't have)
61
  if 'content' in chunk_message:
62
- report.append(chunk_message['content']) # extract the message
63
- # add the latest text and merge it with all previous
64
- result = "".join(report).strip()
65
- # res_box.success(result) # output to response text box
66
- res_box.success(result)
67
-
68
 
 
 
69
 
70
 
71
 
 
32
  # def completion_with_backoff(**kwargs):
33
  # return openai.ChatCompletion.create(**kwargs)
34
 
35
+ class ChatCompletionResult:
36
+ def __init__(self):
37
+ self.content = ""
38
+
39
+ def add_content(self, text):
40
+ self.content += text
41
+
42
+ def get_full_content(self):
43
+ return self.content.strip()
44
+
45
  def run_query(context, label, model_sel_name):
46
  '''
47
+ Summarize provided test
48
  '''
49
  chatbot_role = """You are an analyst specializing in climate change impact assessments and producing insights from policy documents."""
50
  messages = [{"role": "system", "content": chatbot_role},{"role": "user", "content": get_prompt(context, label)}]
51
 
52
  # Initialize the client, pointing it to one of the available models
53
+ client = InferenceClient(model_sel_name, token=hf_token)
54
 
55
+ # Instantiate ChatCompletion as a generator object (stream is set to True)
56
+ chat_completion = client.chat.completions.create(
57
  messages=messages,
58
  stream=True
59
  )
 
60
 
61
+ # Create an object to store the full chat completion
62
+ completion_result = ChatCompletionResult()
63
  res_box = st.empty()
64
+
65
+ # Iterate through the streamed output
66
  for chunk in chat_completion:
67
+ # Extract the object containing the text
68
+ if chunk.choices is not None:
69
  chunk_message = chunk.choices[0].delta
 
70
  if 'content' in chunk_message:
71
+ completion_result.add_content(chunk_message['content']) # Store the message
72
+ # Add the latest text and merge it with all previous
73
+ result = completion_result.get_full_content()
74
+ res_box.success(result) # Output to response text box
 
 
75
 
76
+ # Return the stored chat completion object for later use
77
+ return completion_result
78
 
79
 
80