fahadMizan commited on
Commit
0fe5401
·
verified ·
1 Parent(s): fecb470

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -22
app.py CHANGED
@@ -1,23 +1,26 @@
1
- # Define the messages properly before using them
2
- messages = [
3
- {"role": "system", "content": "You are a helpful assistant."},
4
- {"role": "user", "content": "Hello!"},
5
- ]
6
 
7
- # Stream the response from the model
8
- def stream_response():
9
- response = ""
10
- # Make sure to define 'messages' before using it
11
- for message in client.chat_completion(
12
- messages,
13
- max_tokens=max_tokens,
14
- stream=True,
15
- temperature=temperature,
16
- top_p=top_p,
17
- ):
18
- token = message.choices[0].delta.content
19
- response += token
20
- yield response
21
-
22
- for resp in stream_response():
23
- print(resp)
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
 
 
4
 
5
+ # Load the model and tokenizer from Hugging Face
6
+ model_name = "fahadMizan/chatbot"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
+
10
+ # Define the chatbot function
11
+ def chatbot(input_text):
12
+ inputs = tokenizer(input_text, return_tensors="pt")
13
+ outputs = model.generate(inputs["input_ids"], max_length=100)
14
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
15
+ return response
16
+
17
+ # Create the Gradio interface
18
+ iface = gr.Interface(fn=chatbot,
19
+ inputs="text",
20
+ outputs="text",
21
+ title="Fahad Chatbot",
22
+ description="A chatbot using the 'fahadMizan/chatbot' model from Hugging Face.")
23
+
24
+ # Launch the app
25
+ if __name__ == "__main__":
26
+ iface.launch()