File size: 2,947 Bytes
56e08cb
e8b622f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
import PyPDF2

# Load Models
model1_name = "t5-small"  # Model 1
model2_name = "codeparrot/codeparrot-small"  # Model 2
model3_name = "Salesforce/blip-image-captioning"  # Model 3

# Load Pipelines
model1 = pipeline("text2text-generation", model=model1_name, tokenizer=model1_name)
model2 = pipeline("text-generation", model=model2_name, tokenizer=model2_name)
model3 = pipeline("image-to-text", model=model3_name)  # We'll adapt this for PDF processing

# Helper: Extract text from PDF
def extract_text_from_pdf(pdf_file):
    pdf_reader = PyPDF2.PdfReader(pdf_file)
    text = ""
    for page in pdf_reader.pages:
        text += page.extract_text()
    return text

# Function for Model 1
def model1_func(input_text):
    try:
        result = model1(input_text, max_length=50, num_return_sequences=1)
        answer = result[0]["generated_text"]
        return f"Model 1 Output: {answer}"
    except Exception as e:
        return f"Error: {str(e)}"

# Function for Model 2
def model2_func(input_text):
    try:
        result = model2(input_text, max_length=50, num_return_sequences=1)
        answer = result[0]["generated_text"]
        return f"Model 2 Output: {answer}"
    except Exception as e:
        return f"Error: {str(e)}"

# Function for Model 3
def model3_func(pdf_file):
    try:
        extracted_text = extract_text_from_pdf(pdf_file)
        if not extracted_text.strip():
            return "No text found in the PDF. Please upload a valid file."
        result = model3(extracted_text)
        answer = result[0]["generated_text"]
        return f"Model 3 Output: {answer}"
    except Exception as e:
        return f"Error: {str(e)}"

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("<h1>Multi-Model NLP Tool</h1>")
    
    with gr.Tab("Model 1"):
        gr.Markdown("**Model 1: Text-to-Text (e.g., Summarization)**")
        model1_input = gr.Textbox(label="Enter Text", placeholder="Type here...")
        model1_output = gr.Textbox(label="Output")
        model1_button = gr.Button("Generate")
        model1_button.click(model1_func, inputs=model1_input, outputs=model1_output)
    
    with gr.Tab("Model 2"):
        gr.Markdown("**Model 2: Text Generation (e.g., Code Generation)**")
        model2_input = gr.Textbox(label="Enter Text", placeholder="Type here...")
        model2_output = gr.Textbox(label="Output")
        model2_button = gr.Button("Generate")
        model2_button.click(model2_func, inputs=model2_input, outputs=model2_output)
    
    with gr.Tab("Model 3"):
        gr.Markdown("**Model 3: Document Reader (PDF Input)**")
        pdf_input = gr.File(label="Upload PDF")
        model3_output = gr.Textbox(label="Output")
        model3_button = gr.Button("Process PDF")
        model3_button.click(model3_func, inputs=pdf_input, outputs=model3_output)

# Launch the app
demo.launch()