Gbssreejith commited on
Commit
71a3199
·
verified ·
1 Parent(s): f2dd2f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -0
app.py CHANGED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ import os
6
+ import torch
7
+ from datasets import load_dataset
8
+ from transformers import (
9
+ AutoModelForCausalLM,
10
+ AutoTokenizer,
11
+ BitsAndBytesConfig,
12
+ HfArgumentParser,
13
+ TrainingArguments,
14
+ pipeline,
15
+ logging,
16
+ )
17
+ from peft import LoraConfig, PeftModel
18
+ # from trl import SFTTrainer
19
+
20
+ tokenizer = AutoTokenizer.from_pretrained("Gbssreejith/new_TinyLlama3")
21
+ model = AutoModelForCausalLM.from_pretrained("Gbssreejith/new_TinyLlama3")
22
+
23
+ def generate_response(prompt):
24
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=50)
25
+ result = pipe(f"<s>[INST] {prompt} [/INST]")
26
+ return result[0]['generated_text']
27
+
28
+ iface = gr.Interface(
29
+ fn=generate_response,
30
+ inputs="text",
31
+ outputs="text",
32
+ title="Text Generation",
33
+ description="Enter a prompt and get a generated response.",
34
+ examples=[
35
+ ["I'm having trouble sleeping. Any advice?"],
36
+ ["I sad i dont know what to do"]
37
+ ]
38
+ )
39
+
40
+ iface.launch(share=True,debug=True)