pro-grammer commited on
Commit
e2a7184
·
verified ·
1 Parent(s): 7203a8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -17
app.py CHANGED
@@ -1,37 +1,34 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Initialize the InferenceClient with your custom model hosted on Hugging Face.
5
  client = InferenceClient(model="pro-grammer/MindfulAI")
6
 
7
  def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
8
- # Build conversation context
9
- messages = [{"role": "system", "content": system_message}]
10
  for user_msg, assistant_msg in history:
11
- if user_msg:
12
- messages.append({"role": "user", "content": user_msg})
13
- if assistant_msg:
14
- messages.append({"role": "assistant", "content": assistant_msg})
15
- messages.append({"role": "user", "content": message})
16
-
17
  response = ""
18
- # Use the chat_completion method to stream the model's response
19
- for message in client.chat_completion(
20
- messages,
21
- max_tokens=max_tokens,
22
  stream=True,
23
  temperature=temperature,
24
  top_p=top_p,
25
  ):
26
- token = message.choices[0].delta.content
27
- response += token
 
28
  yield response
29
 
30
- # Customize the ChatInterface with additional input controls
31
  demo = gr.ChatInterface(
32
  fn=respond,
33
  title="MindfulAI Chat",
34
- description="Chat with MindfulAI – your hosted AI Therapist.",
35
  additional_inputs=[
36
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
37
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
@@ -42,3 +39,4 @@ demo = gr.ChatInterface(
42
 
43
  if __name__ == "__main__":
44
  demo.launch()
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Initialize the InferenceClient with your model from Hugging Face
5
  client = InferenceClient(model="pro-grammer/MindfulAI")
6
 
7
  def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
8
+ # Build a prompt string manually
9
+ prompt = system_message + "\n"
10
  for user_msg, assistant_msg in history:
11
+ prompt += f"Human: {user_msg}\nAssistant: {assistant_msg}\n"
12
+ prompt += f"Human: {message}\nAssistant:"
13
+
 
 
 
14
  response = ""
15
+ # Use text_generation instead of chat_completion
16
+ for token in client.text_generation(
17
+ prompt,
18
+ max_new_tokens=max_tokens,
19
  stream=True,
20
  temperature=temperature,
21
  top_p=top_p,
22
  ):
23
+ # Depending on the API response structure, extract the generated text
24
+ token_text = token.get("generated_text", "")
25
+ response += token_text
26
  yield response
27
 
 
28
  demo = gr.ChatInterface(
29
  fn=respond,
30
  title="MindfulAI Chat",
31
+ description="Chat with MindfulAI – your AI Therapist powered by your model.",
32
  additional_inputs=[
33
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
34
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
39
 
40
  if __name__ == "__main__":
41
  demo.launch()
42
+