elghafiani commited on
Commit
518218a
·
1 Parent(s): ae1b4d9
Files changed (1) hide show
  1. app.py +76 -1
app.py CHANGED
@@ -1,3 +1,78 @@
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
- gr.load("models/ise-uiuc/Magicoder-CL-7B").launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+ import transformers
3
+ import os
4
+ import sys
5
+ import fire
6
+ import torch
7
  import gradio as gr
8
 
9
+
10
+ def main(
11
+ base_model="ise-uiuc/Magicoder-S-DS-6.7B",
12
+ device="cuda:0",
13
+ port=8080,
14
+ ):
15
+ tokenizer = AutoTokenizer.from_pretrained(base_model)
16
+ pipeline = transformers.pipeline(
17
+ "text-generation",
18
+ model=base_model,
19
+ torch_dtype=torch.float16,
20
+ device=device
21
+ )
22
+ def evaluate_magicoder(
23
+ instruction,
24
+ temperature=1,
25
+ max_new_tokens=2048,
26
+ ):
27
+ MAGICODER_PROMPT = """You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.
28
+
29
+ @@ Instruction
30
+ {instruction}
31
+
32
+ @@ Response
33
+ """
34
+ prompt = MAGICODER_PROMPT.format(instruction=instruction)
35
+
36
+ if temperature > 0:
37
+ sequences = pipeline(
38
+ prompt,
39
+ do_sample=True,
40
+ temperature=temperature,
41
+ max_new_tokens=max_new_tokens,
42
+ )
43
+ else:
44
+ sequences = pipeline(
45
+ prompt,
46
+ max_new_tokens=max_new_tokens,
47
+ )
48
+ for seq in sequences:
49
+ print('==========================question=============================')
50
+ print(prompt)
51
+ generated_text = seq['generated_text'].replace(prompt, "")
52
+ print('===========================answer=============================')
53
+ print(generated_text)
54
+ return generated_text
55
+
56
+ gr.Interface(
57
+ fn=evaluate_magicoder,
58
+ inputs=[
59
+ gr.components.Textbox(
60
+ lines=3, label="Instruction", placeholder="Anything you want to ask Magicoder ?"
61
+ ),
62
+ gr.components.Slider(minimum=0, maximum=1, value=1, label="Temperature"),
63
+ gr.components.Slider(
64
+ minimum=1, maximum=2048, step=1, value=512, label="Max tokens"
65
+ ),
66
+ ],
67
+ outputs=[
68
+ gr.components.Textbox(
69
+ lines=30,
70
+ label="Output",
71
+ )
72
+ ],
73
+ title="Magicoder",
74
+ description="This is a LLM playground for Magicoder! Follow us on Github: https://github.com/ise-uiuc/magicoder and Huggingface: https://huggingface.co/ise-uiuc."
75
+ ).queue().launch(share=True, server_port=port)
76
+
77
+ if __name__ == "__main__":
78
+ fire.Fire(main)