John Smith commited on
Commit
4dbb3f0
·
verified ·
1 Parent(s): 5b39830

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -19
app.py CHANGED
@@ -2,35 +2,49 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
- # Load model and tokenizer
6
  model_name = "cognitivecomputations/TinyDolphin-2.8-1.1b"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
9
 
10
- def generate_response(message, history):
11
- # Format the input with chat history
12
- prompt = "".join([f"Human: {h[0]}\nAssistant: {h[1]}\n" for h in history])
13
- prompt += f"Human: {message}\nAssistant:"
14
 
 
 
 
 
 
 
 
 
15
  # Tokenize and generate
16
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
17
- outputs = model.generate(**inputs, max_new_tokens=1000, temperature=0.7, do_sample=True)
 
 
 
 
 
 
 
18
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
-
20
- # Extract only the assistant's response
21
- assistant_response = response.split("Assistant:")[-1].strip()
22
- return assistant_response
 
23
 
24
  # Create the Gradio interface
25
  iface = gr.ChatInterface(
26
  generate_response,
27
- title="TinyDolphin-2.8-1.1b Chat Interface",
28
- description="Chat with the TinyDolphin-2.8-1.1b model. Type your message and press Enter.",
29
- examples=[
30
- "What is the capital of France?",
31
- "Explain quantum computing in simple terms.",
32
- "Write a short poem about artificial intelligence."
33
- ],
34
  cache_examples=False,
35
  )
36
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
+ # Load the model and tokenizer
6
  model_name = "cognitivecomputations/TinyDolphin-2.8-1.1b"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
+ # Move model to GPU if available
11
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ model.to(device)
 
13
 
14
+ def generate_response(message, chat_history):
15
+ # Prepare the input
16
+ chat_history_text = ""
17
+ for turn in chat_history:
18
+ chat_history_text += f"Human: {turn[0]}\nAI: {turn[1]}\n"
19
+
20
+ prompt = f"{chat_history_text}Human: {message}\nAI:"
21
+
22
  # Tokenize and generate
23
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
24
+ outputs = model.generate(
25
+ **inputs,
26
+ max_new_tokens=100,
27
+ temperature=0.7,
28
+ top_p=0.9,
29
+ do_sample=True
30
+ )
31
+
32
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+
34
+ # Extract only the AI's response
35
+ ai_response = response.split("AI:")[-1].strip()
36
+
37
+ return ai_response
38
 
39
  # Create the Gradio interface
40
  iface = gr.ChatInterface(
41
  generate_response,
42
+ chatbot=gr.Chatbot(height=300),
43
+ textbox=gr.Textbox(placeholder="Type your message here...", container=False, scale=7),
44
+ title="TinyDolphin-2.8-1.1b Chatbot",
45
+ description="Chat with the TinyDolphin-2.8-1.1b model.",
46
+ theme="soft",
47
+ examples=["Tell me a short story", "What's the capital of France?", "Explain quantum computing"],
 
48
  cache_examples=False,
49
  )
50