mudogruer commited on
Commit
e70ad94
·
verified ·
1 Parent(s): c7a638b

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """gradio.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1goHcmXF0Gc4_X9PN-zecV77j9KeI6Dmn
8
+ """
9
+
10
+ # !pip install -q -U gradio
11
+ # !pip install -q -U torch transformers accelerate einops
12
+ # !pip install -q peft
13
+
14
+
15
+
16
+
17
+
18
+ import gradio as gr
19
+
20
+ import torch
21
+ from transformers import (
22
+ AutoTokenizer,
23
+ AutoModelForCausalLM,
24
+ TextIteratorStreamer,
25
+ pipeline,
26
+ )
27
+
28
+ # The huggingface model id for Microsoft's phi-2 model
29
+ # Download and load model and tokenizer
30
+ tokenizer = AutoTokenizer.from_pretrained("mudogruer/mixtral-7x8b-SciQ", trust_remote_code=True)
31
+
32
+ #Download safetensor of adapter of fine-tune Phi-2 model
33
+ from peft import PeftModel, PeftConfig
34
+
35
+
36
+ config = PeftConfig.from_pretrained("mudogruer/mixtral-7x8b-SciQ")
37
+ base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
38
+ model = PeftModel.from_pretrained(base_model, "mudogruer/mixtral-7x8b-SciQ")
39
+
40
+ # Text generation pipeline
41
+ phi2 = pipeline(
42
+ "text-generation",
43
+ tokenizer=tokenizer,
44
+ model=model,
45
+ pad_token_id=tokenizer.eos_token_id,
46
+ eos_token_id=tokenizer.eos_token_id,
47
+ device_map="cpu",
48
+ )
49
+
50
+ examples = [["Which organelle carries out the synthesis and packaging of digestive enzymes?"],
51
+ ["What is the change in speed of a moving object per unit time?"] ,
52
+ ["What is the formula of carbon tetrafluoride?"]]
53
+
54
+ def generate(message, max_new_tokens):
55
+ instruction = "You are a helpful assistant to 'User'. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
56
+ final_prompt = f"Instruction: {instruction}\nUser: {message}\nOutput:"
57
+
58
+ # Generate text synchronously
59
+ response = phi2(final_prompt, max_new_tokens=max_new_tokens)
60
+ generated_text = response[0]['generated_text']
61
+
62
+ # Process to extract the last assistant's response
63
+ # Assuming the last line after 'Output:' is the response
64
+ last_response = generated_text.split('Output:')[-1].strip()
65
+ return last_response
66
+
67
+ # Update the Gradio interface setup
68
+ with gr.Blocks() as demo:
69
+ gr.Markdown("""### Mixtral-7x8b Scientific Question Chatbot(Fine-tuned from SciQ dataset)""")
70
+ tokens_slider = gr.Slider(8, 128, value=21, label="Maximum new tokens")
71
+ chatbot = gr.Interface(fn=generate, inputs=["text", tokens_slider], outputs="text", examples = examples)
72
+ demo.launch(share=True)