VidyaPeddinti commited on
Commit
4f6884b
·
verified ·
1 Parent(s): 9ec1a52

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +19 -0
  2. mistral_space.yaml +3 -0
  3. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
+
4
+
5
+ model_name = "mistralai/Mistral-7B-v0.3"
6
+
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+
11
+ def gen_text(prompt):
12
+ inputs = tokenizer(prompt, return_tensors="pt")
13
+ outputs = model.generate(inputs.input_ids, max_length=50)
14
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
15
+
16
+ # Set up the Gradio interface
17
+ iface = gr.Interface(fn=gen_text, inputs="text", outputs="text", title="Mistral-7B Text Generator")
18
+
19
+ iface.launch()
mistral_space.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ sdk: gradio
2
+ hardware:
3
+ type: gpu
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ gradio