joermd commited on
Commit
edbe728
·
verified ·
1 Parent(s): 6458507

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -16
app.py CHANGED
@@ -1,9 +1,23 @@
1
- # app.py
2
  import gradio as gr
3
- from huggingface_hub import InferenceClient
 
 
4
 
5
- client = InferenceClient("joermd/speedy-llama2")
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def respond(
8
  message,
9
  history: list[tuple[str, str]],
@@ -12,29 +26,31 @@ def respond(
12
  temperature,
13
  top_p,
14
  ):
15
- messages = [{"role": "system", "content": system_message}]
 
16
  for val in history:
17
  if val[0]:
18
  messages.append({"role": "user", "content": val[0]})
19
  if val[1]:
20
  messages.append({"role": "assistant", "content": val[1]})
 
21
  messages.append({"role": "user", "content": message})
22
- response = ""
23
- for message in client.chat_completion(
24
  messages,
25
- max_tokens=max_tokens,
26
- stream=True,
27
- temperature=temperature,
28
- top_p=top_p,
29
- ):
30
- token = message.choices[0].delta.content
31
- response += token
32
- yield response
33
 
 
 
 
34
  demo = gr.ChatInterface(
35
  respond,
36
  additional_inputs=[
37
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
38
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
39
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
40
  gr.Slider(
@@ -47,5 +63,6 @@ demo = gr.ChatInterface(
47
  ],
48
  )
49
 
 
50
  if __name__ == "__main__":
51
- demo.launch()
 
 
1
  import gradio as gr
2
+ import spaces
3
+ import transformers
4
+ import torch
5
 
6
+ model_id = "joermd/speedy-llama2"
7
 
8
+ pipeline = transformers.pipeline(
9
+ "text-generation",
10
+ model=model_id,
11
+ model_kwargs={"torch_dtype": torch.bfloat16},
12
+ device_map="auto",
13
+ )
14
+
15
+ terminators = [
16
+ pipeline.tokenizer.eos_token_id,
17
+ pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
18
+ ]
19
+
20
+ @spaces.GPU
21
  def respond(
22
  message,
23
  history: list[tuple[str, str]],
 
26
  temperature,
27
  top_p,
28
  ):
29
+ messages = []
30
+
31
  for val in history:
32
  if val[0]:
33
  messages.append({"role": "user", "content": val[0]})
34
  if val[1]:
35
  messages.append({"role": "assistant", "content": val[1]})
36
+
37
  messages.append({"role": "user", "content": message})
38
+
39
+ outputs = pipeline(
40
  messages,
41
+ max_new_tokens=256,
42
+ eos_token_id=terminators,
43
+ )
44
+
45
+ yield outputs[0]["generated_text"][-1]["content"]
 
 
 
46
 
47
+ """
48
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
49
+ """
50
  demo = gr.ChatInterface(
51
  respond,
52
  additional_inputs=[
53
+ gr.Textbox(value="Kamu adalah seorang asisten yang baik", label="System message"),
54
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
55
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
56
  gr.Slider(
 
63
  ],
64
  )
65
 
66
+
67
  if __name__ == "__main__":
68
+ demo.launch()