suwesh commited on
Commit
0783741
Β·
verified Β·
1 Parent(s): 7125111

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -1,19 +1,19 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  from transformers import pipeline
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
  #client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
- client = InferenceClient("distilbert/distilgpt2")
10
  modelpath = "distilgpt2"
11
 
12
  pipe = pipeline(
13
  "text-generation",
14
  model=modelpath
15
  )
16
- messages = [
17
  {"role": "system", "content": "You are a customer applying for a housing loan in India. Provide dummy details about your application and negotiate the terms."},
18
  {"role": "user", "content": "Hi!Welcome to Hero Housing Finance!"},
19
  {"role": "assistant", "content": "Hello, I would like to apply for a loan."},
@@ -34,6 +34,10 @@ def respond(
34
  ):
35
  messages = [{"role": "system", "content": system_message}]
36
  try:
 
 
 
 
37
  for val in history:
38
  if val[0]:
39
  messages.append({"role": "user", "content": val[0]})
@@ -44,19 +48,15 @@ def respond(
44
  except Exception as e:
45
  return f"Error: {str(e)}"
46
 
47
- response = ""
48
 
49
- for message in client.chat_completion(
50
- messages,
51
- max_tokens=max_tokens,
52
- stream=True,
53
  temperature=temperature,
54
  top_p=top_p,
55
- ):
56
- token = message.choices[0].delta.content
57
-
58
- response += token
59
- yield response
60
 
61
 
62
  """
 
1
  import gradio as gr
2
+ #from huggingface_hub import InferenceClient
3
  from transformers import pipeline
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
  #client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+
10
  modelpath = "distilgpt2"
11
 
12
  pipe = pipeline(
13
  "text-generation",
14
  model=modelpath
15
  )
16
+ initial_messages = [
17
  {"role": "system", "content": "You are a customer applying for a housing loan in India. Provide dummy details about your application and negotiate the terms."},
18
  {"role": "user", "content": "Hi!Welcome to Hero Housing Finance!"},
19
  {"role": "assistant", "content": "Hello, I would like to apply for a loan."},
 
34
  ):
35
  messages = [{"role": "system", "content": system_message}]
36
  try:
37
+ #add initil message to the conversation history
38
+ for msg in initial_messages:
39
+ messages.append(msg)
40
+
41
  for val in history:
42
  if val[0]:
43
  messages.append({"role": "user", "content": val[0]})
 
48
  except Exception as e:
49
  return f"Error: {str(e)}"
50
 
51
+ combined_messages = " ".join([msg["content"] for msg in messages])
52
 
53
+ response = pipe(
54
+ combined_messages,
55
+ max_new_tokens=max_tokens,
 
56
  temperature=temperature,
57
  top_p=top_p,
58
+ )[0]["generated_text"]
59
+ yield response
 
 
 
60
 
61
 
62
  """