ArrcttacsrjksX commited on
Commit
3180dde
·
verified ·
1 Parent(s): 45911c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -15
app.py CHANGED
@@ -1,22 +1,32 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Load GPT-2 chat model
5
- chatbot = pipeline("text-generation", model="gpt2")
 
 
6
 
7
- # Function to handle the chatbot interaction
8
- def chat_with_gpt2(messages):
9
- prompt = messages[-1][1] # Get the latest message from the user
10
- response = chatbot(prompt, max_length=100, num_return_sequences=1)
11
- return messages + [(response[0]['generated_text'], "GPT-2")]
 
 
 
 
 
 
 
 
 
12
 
13
- # Create a Gradio Chatbot interface
14
- iface = gr.Interface(
15
- fn=chat_with_gpt2,
16
- inputs=gr.Chatbot(),
17
- outputs=gr.Chatbot(),
18
  title="Chat with GPT-2",
19
- description="A simple chat app using GPT-2"
20
  )
21
 
22
- iface.launch()
 
 
1
  import gradio as gr
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
 
4
+ # Load pre-trained GPT-2 model and tokenizer
5
+ model_name = "gpt2"
6
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
7
+ model = GPT2LMHeadModel.from_pretrained(model_name)
8
 
9
+ def generate_response(message, history):
10
+ # Combine the conversation history with the new message
11
+ input_text = f"{message}"
12
+
13
+ # Tokenize input text
14
+ inputs = tokenizer.encode(input_text, return_tensors="pt")
15
+
16
+ # Generate response using GPT-2
17
+ outputs = model.generate(inputs, max_length=50, num_return_sequences=1)
18
+
19
+ # Decode generated text
20
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
21
+
22
+ return response
23
 
24
+ # Create ChatInterface
25
+ demo = gr.ChatInterface(
26
+ fn=generate_response,
 
 
27
  title="Chat with GPT-2",
28
+ description="A simple chatbot powered by GPT-2."
29
  )
30
 
31
+ # Launch the app
32
+ demo.launch()