KevinHuSh commited on
Commit
71b2e07
·
1 Parent(s): bf46bd5

add locally deployed llm (#841)

Browse files

### What problem does this PR solve?


### Type of change

- [x] New Feature (non-breaking change which adds functionality)

Files changed (1) hide show
  1. rag/llm/chat_model.py +16 -1
rag/llm/chat_model.py CHANGED
@@ -298,4 +298,19 @@ class LocalLLM(Base):
298
  )
299
  return ans, num_tokens_from_string(ans)
300
  except Exception as e:
301
- return "**ERROR**: " + str(e), 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  )
299
  return ans, num_tokens_from_string(ans)
300
  except Exception as e:
301
+ return "**ERROR**: " + str(e), 0
302
+
303
+ def chat_streamly(self, system, history, gen_conf):
304
+ if system:
305
+ history.insert(0, {"role": "system", "content": system})
306
+ token_count = 0
307
+ answer = ""
308
+ try:
309
+ for ans in self.client.chat_streamly(history, gen_conf):
310
+ answer += ans
311
+ token_count += 1
312
+ yield answer
313
+ except Exception as e:
314
+ yield answer + "\n**ERROR**: " + str(e)
315
+
316
+ yield token_count