aaliyaan commited on
Commit
8605573
·
1 Parent(s): 353af85

changed gui

Browse files
Files changed (1) hide show
  1. app.py +84 -86
app.py CHANGED
@@ -1,8 +1,8 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
3
  from PyPDF2 import PdfReader
4
 
5
- # Models and tokenizers setup
6
  models = {
7
  "Text Generator (Bloom)": {
8
  "model": AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m"),
@@ -10,98 +10,96 @@ models = {
10
  },
11
  "PDF Summarizer (T5)": {
12
  "model": AutoModelForSeq2SeqLM.from_pretrained("t5-small"),
13
- "tokenizer": AutoTokenizer.from_pretrained("t5-small", use_fast=False), # Use the slow tokenizer
14
  },
15
  "Broken Answer (T0pp)": {
16
  "model": AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp"),
17
- "tokenizer": AutoTokenizer.from_pretrained("bigscience/T0pp", use_fast=False), # Use the slow tokenizer
18
  },
19
  }
20
 
 
 
 
 
 
 
 
 
21
 
22
- # Function for text generation
23
- def generate_text(model_choice, input_text, max_tokens, temperature, top_p):
24
  model_info = models[model_choice]
25
  tokenizer = model_info["tokenizer"]
26
  model = model_info["model"]
27
 
28
- inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=512)
29
- outputs = model.generate(
30
- **inputs, max_length=max_tokens, num_beams=5, early_stopping=True, temperature=temperature, top_p=top_p
31
- )
32
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
33
-
34
- # Function for PDF summarization
35
- def summarize_pdf(pdf_file, max_tokens, temperature, top_p):
36
- reader = PdfReader(pdf_file)
37
- text = ""
38
- for page in reader.pages:
39
- text += page.extract_text()
40
-
41
- model_info = models["PDF Summarizer (T5)"]
42
- tokenizer = model_info["tokenizer"]
43
- model = model_info["model"]
44
-
45
- inputs = tokenizer("summarize: " + text, return_tensors="pt", padding=True, truncation=True, max_length=512)
46
- outputs = model.generate(
47
- **inputs, max_length=max_tokens, num_beams=5, early_stopping=True, temperature=temperature, top_p=top_p
48
- )
49
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
50
-
51
- # Build Gradio interface
52
- def launch_custom_app():
53
- with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
54
- gr.Markdown("<h1 style='text-align: center;'>💡 Multi-Model Assistant</h1>")
55
- gr.Markdown("<p style='text-align: center;'>Switch between text generation, PDF summarization, or quirky broken answers!</p>")
56
-
57
- with gr.Tabs():
58
- # Tab for Text Generation
59
- with gr.Tab("Text Generator"):
60
- model_choice = gr.Dropdown(choices=list(models.keys()), label="Choose a Model", value="Text Generator (Bloom)")
61
- input_text = gr.Textbox(label="Enter Text")
62
- max_tokens = gr.Slider(minimum=10, maximum=512, value=150, step=10, label="Max Tokens")
63
- temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature")
64
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
65
- output_text = gr.Textbox(label="Generated Text", interactive=False)
66
- generate_button = gr.Button("Generate Text")
67
-
68
- generate_button.click(
69
- generate_text,
70
- inputs=[model_choice, input_text, max_tokens, temperature, top_p],
71
- outputs=output_text
72
- )
73
-
74
- # Tab for PDF Summarization
75
- with gr.Tab("PDF Summarizer"):
76
- pdf_file = gr.File(label="Upload a PDF File", file_types=[".pdf"])
77
- max_tokens_pdf = gr.Slider(minimum=10, maximum=512, value=150, step=10, label="Max Tokens")
78
- temperature_pdf = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature")
79
- top_p_pdf = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
80
- summary_output = gr.Textbox(label="PDF Summary", interactive=False)
81
- summarize_button = gr.Button("Summarize PDF")
82
-
83
- summarize_button.click(
84
- summarize_pdf,
85
- inputs=[pdf_file, max_tokens_pdf, temperature_pdf, top_p_pdf],
86
- outputs=summary_output
87
- )
88
-
89
- # Tab for Broken Model
90
- with gr.Tab("Broken Answers"):
91
- broken_input = gr.Textbox(label="Enter Text")
92
- broken_max_tokens = gr.Slider(minimum=10, maximum=512, value=150, step=10, label="Max Tokens")
93
- broken_temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature")
94
- broken_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
95
- broken_output = gr.Textbox(label="Broken Model Output", interactive=False)
96
- broken_button = gr.Button("Generate Broken Answer")
97
-
98
- broken_button.click(
99
- lambda text, max_tokens, temp, top_p: generate_text("Broken Answer (T0pp)", text, max_tokens, temp, top_p),
100
- inputs=[broken_input, broken_max_tokens, broken_temperature, broken_top_p],
101
- outputs=broken_output
102
- )
103
-
104
- demo.launch()
105
-
106
- # Launch the app
107
- launch_custom_app()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM
3
  from PyPDF2 import PdfReader
4
 
5
+ # Models and Tokenizers Setup
6
  models = {
7
  "Text Generator (Bloom)": {
8
  "model": AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m"),
 
10
  },
11
  "PDF Summarizer (T5)": {
12
  "model": AutoModelForSeq2SeqLM.from_pretrained("t5-small"),
13
+ "tokenizer": AutoTokenizer.from_pretrained("t5-small", use_fast=False),
14
  },
15
  "Broken Answer (T0pp)": {
16
  "model": AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp"),
17
+ "tokenizer": AutoTokenizer.from_pretrained("bigscience/T0pp", use_fast=False),
18
  },
19
  }
20
 
21
+ # Chat Function
22
+ def chat_with_model(model_choice, user_message, chat_history, file=None):
23
+ if model_choice == "PDF Summarizer (T5)" and file is not None:
24
+ pdf_text = extract_text_from_pdf(file)
25
+ user_message += f"\n\nPDF Content:\n{pdf_text}"
26
+
27
+ if not user_message.strip():
28
+ return chat_history
29
 
 
 
30
  model_info = models[model_choice]
31
  tokenizer = model_info["tokenizer"]
32
  model = model_info["model"]
33
 
34
+ # Tokenize Input
35
+ inputs = tokenizer(user_message, return_tensors="pt", padding=True, truncation=True, max_length=512)
36
+ # Generate Output
37
+ outputs = model.generate(**inputs, max_length=150, num_beams=5, early_stopping=True)
38
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
39
+
40
+ # Update Chat History
41
+ chat_history.append((user_message, response))
42
+ return chat_history
43
+
44
+ # Function to Extract Text from PDF
45
+ def extract_text_from_pdf(file):
46
+ from PyPDF2 import PdfReader
47
+ reader = PdfReader(file.name)
48
+ text = "\n".join(page.extract_text() for page in reader.pages if page.extract_text())
49
+ return text
50
+
51
+ # Interface Setup
52
+ def create_chat_interface():
53
+ with gr.Blocks(css="""
54
+ .chatbox {
55
+ background-color: #f7f7f8;
56
+ border-radius: 12px;
57
+ padding: 16px;
58
+ font-family: 'Segoe UI', Tahoma, sans-serif;
59
+ }
60
+ .chat-title {
61
+ font-size: 24px;
62
+ font-weight: bold;
63
+ text-align: center;
64
+ margin-bottom: 12px;
65
+ color: #3a9fd6;
66
+ }
67
+ """) as interface:
68
+ gr.Markdown("<div class='chat-title'>GPT-Style Chat Interface</div>")
69
+
70
+ with gr.Row():
71
+ model_choice = gr.Dropdown(
72
+ choices=list(models.keys()),
73
+ value="Text Generator (Bloom)",
74
+ label="Select Model"
75
+ )
76
+
77
+ chat_history = gr.Chatbot(label="Chat History", elem_classes="chatbox")
78
+
79
+ user_message = gr.Textbox(
80
+ placeholder="Type your message here...",
81
+ show_label=False,
82
+ elem_classes="chatbox",
83
+ )
84
+
85
+ file_input = gr.File(label="Upload PDF", visible=False, file_types=[".pdf"])
86
+
87
+ def toggle_pdf_input(selected_model):
88
+ return gr.update(visible=(selected_model == "PDF Summarizer (T5)"))
89
+
90
+ model_choice.change(fn=toggle_pdf_input, inputs=model_choice, outputs=file_input)
91
+
92
+ send_button = gr.Button("Send")
93
+
94
+ # Link the send button to the chat function
95
+ send_button.click(
96
+ chat_with_model,
97
+ inputs=[model_choice, user_message, chat_history, file_input],
98
+ outputs=chat_history,
99
+ )
100
+
101
+ return interface
102
+
103
+ if __name__ == "__main__":
104
+ interface = create_chat_interface()
105
+ interface.launch()