MatteoScript commited on
Commit
f83c8d7
·
1 Parent(s): 077236d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -25
app.py CHANGED
@@ -3,28 +3,49 @@ import gradio as gr
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
- def generate(prompt, temperature=0.2, max_new_tokens=30000, top_p=0.9, repetition_penalty=1.0):
7
- temperature = max(float(temperature), 0.01)
8
- top_p = max(min(float(top_p), 1.0), 0.0)
9
- repetition_penalty = max(float(repetition_penalty), 0.01)
10
-
11
- generate_kwargs = {
12
- "temperature": temperature,
13
- "max_new_tokens": max_new_tokens,
14
- "top_p": top_p,
15
- "repetition_penalty": repetition_penalty,
16
- "do_sample": True,
17
- "seed": 42,
18
- }
19
-
20
- response = client.text_generation(prompt, **generate_kwargs)
21
- generated_text = response["generated_text"]
22
- return generated_text
23
-
24
- iface = gr.Interface(
25
- fn=generate,
26
- inputs=["text", gr.inputs.Slider(0.1, 2.0), gr.inputs.Slider(100, 50000), gr.inputs.Slider(0.1, 1.0)],
27
- outputs="text",
28
- title="Text Generation"
29
- )
30
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
+ def format_prompt(message, history):
7
+ prompt = "<s>"
8
+ for user_prompt, bot_response in history:
9
+ prompt += f"[INST] {user_prompt} [/INST]"
10
+ prompt += f" {bot_response}</s> "
11
+ prompt += f"[INST] {message} [/INST]"
12
+ return prompt
13
+
14
+ def generate(
15
+ prompt, history, temperature=0.2, max_new_tokens=30000, top_p=0.95, repetition_penalty=1.0,
16
+ ):
17
+ temperature = float(temperature)
18
+ if temperature < 1e-2:
19
+ temperature = 1e-2
20
+ top_p = float(top_p)
21
+
22
+ generate_kwargs = dict(
23
+ temperature=temperature,
24
+ max_new_tokens=max_new_tokens,
25
+ top_p=top_p,
26
+ repetition_penalty=repetition_penalty,
27
+ do_sample=True,
28
+ seed=42,
29
+ )
30
+
31
+ formatted_prompt = format_prompt(prompt, history)
32
+
33
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
34
+ output = ""
35
+
36
+ for response in stream:
37
+ output += response.token.text
38
+ yield output
39
+ return output
40
+
41
+ mychatbot = gr.Chatbot(
42
+ avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
43
+
44
+ demo = gr.ChatInterface(fn=generate,
45
+ chatbot=mychatbot,
46
+ title="Matteo's Mixtral 8x7b Chat",
47
+ retry_btn=None,
48
+ undo_btn=None
49
+ )
50
+
51
+ demo.queue().launch(show_api=True)