File size: 4,965 Bytes
a15fa3d
1b4017a
a15fa3d
6c0de32
 
 
 
 
1b9a999
a2b5272
00403f8
79ce31f
bac4aa5
 
6c0de32
00403f8
79ce31f
 
6c0de32
00403f8
6c0de32
 
1b9a999
6c0de32
 
00403f8
1b9a999
79ce31f
6c0de32
1b9a999
 
6c0de32
1b9a999
79ce31f
00403f8
1b9a999
 
 
 
 
 
 
 
00403f8
1b9a999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a15fa3d
1b9a999
 
 
 
 
a15fa3d
1b9a999
6c0de32
1b9a999
 
 
 
 
 
 
56f0319
1b9a999
56f0319
 
 
 
 
26ab425
56f0319
 
 
 
 
 
 
 
 
 
74342e2
 
 
 
 
1b9a999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a15fa3d
1b9a999
 
 
 
 
 
 
 
 
 
6c0de32
00403f8
1b9a999
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"  # Disable GPU and enforce CPU execution

import gradio as gr
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
import pickle
import re

# Load models
gru_model = load_model("best_GRU_tuning_model.h5")
lstm_model = load_model("LSTM_model.h5")
bilstm_model = load_model("BiLSTM_model.h5")

# Load tokenizer
with open("my_tokenizer.pkl", "rb") as f:
    tokenizer = pickle.load(f)


def preprocess_text(text):
    text = text.lower()
    text = re.sub(r"[^a-zA-Z\s]", "", text).strip()
    return text


def predict_with_gru(text):
    cleaned = preprocess_text(text)
    seq = tokenizer.texts_to_sequences([cleaned])
    padded_seq = pad_sequences(seq, maxlen=200)  
    probs = gru_model.predict(padded_seq)
    predicted_class = np.argmax(probs, axis=1)[0]
    return int(predicted_class + 1)


def predict_with_lstm(text):
    cleaned = preprocess_text(text)
    seq = tokenizer.texts_to_sequences([cleaned])
    padded_seq = pad_sequences(seq, maxlen=200) 
    probs = lstm_model.predict(padded_seq)
    predicted_class = np.argmax(probs, axis=1)[0]
    return int(predicted_class + 1)


def predict_with_bilstm(text):
    cleaned = preprocess_text(text)
    seq = tokenizer.texts_to_sequences([cleaned])
    padded_seq = pad_sequences(seq, maxlen=200)  
    probs = bilstm_model.predict(padded_seq)
    predicted_class = np.argmax(probs, axis=1)[0]
    return int(predicted_class + 1)

# Unified function for sentiment analysis and statistics
def analyze_sentiment_and_statistics(text):
    results = {
        "GRU Model": predict_with_gru(text),
        "LSTM Model": predict_with_lstm(text),
        "BiLSTM Model": predict_with_bilstm(text),
    }
    
    # Calculate statistics
    scores = list(results.values())
    min_score = min(scores)
    max_score = max(scores)
    min_score_models = [model for model, score in results.items() if score == min_score]
    max_score_models = [model for model, score in results.items() if score == max_score]
    average_score = np.mean(scores)

    if all(score == scores[0] for score in scores):
        statistics = {
            "Message": "All models predict the same score.",
            "Average Score": f"{average_score:.2f}",
        }
    else:
        statistics = {
            "Lowest Score": f"{min_score} (Models: {', '.join(min_score_models)})",
            "Highest Score": f"{max_score} (Models: {', '.join(max_score_models)})",
            "Average Score": f"{average_score:.2f}",
        }
    return results, statistics

# Gradio Interface
with gr.Blocks(css=".gradio-container { max-width: 900px; margin: auto; padding: 20px; }") as demo:
    gr.Markdown("# RNN Sentiment Analysis")
    gr.Markdown(
        "Predict the sentiment of your text review using RNN-based models."
    )
    
    # Text input box
    with gr.Row():
        text_input = gr.Textbox(
            label="Enter your text here:", 
            lines=3, 
            placeholder="Type your review here..."
        )
    
    # Prediction and statistics boxes
    with gr.Row():
        with gr.Column():
            gru_output = gr.Textbox(label="Predicted Sentiment (GRU Model)", interactive=False)
            lstm_output = gr.Textbox(label="Predicted Sentiment (LSTM Model)", interactive=False)
            bilstm_output = gr.Textbox(label="Predicted Sentiment (BiLSTM Model)", interactive=False)
        
        with gr.Column():
            statistics_output = gr.Textbox(label="Statistics (Lowest, Highest, Average)", interactive=False)

    # Buttons placed together in a row (second line)
    with gr.Row():
        analyze_button = gr.Button("Analyze Sentiment", variant="primary")  # Blue button
        clear_button = gr.ClearButton([text_input, gru_output, lstm_output, bilstm_output, statistics_output])  # Clear button

    # Button to analyze sentiment and show statistics
    def process_input_and_analyze(text_input):
        results, statistics = analyze_sentiment_and_statistics(text_input)
        if "Message" in statistics:
            return (
                f"{results['GRU Model']}",
                f"{results['LSTM Model']}",
                f"{results['BiLSTM Model']}",
                f"Statistics:\n{statistics['Message']}\nAverage Score: {statistics['Average Score']}"
            )
        else:
            return (
                f"{results['GRU Model']}",
                f"{results['LSTM Model']}",
                f"{results['BiLSTM Model']}",
                f"Statistics:\n{statistics['Lowest Score']}\n{statistics['Highest Score']}\nAverage Score: {statistics['Average Score']}"
            )
    
    analyze_button.click(
        process_input_and_analyze,
        inputs=[text_input],
        outputs=[
            gru_output,
            lstm_output, 
            bilstm_output, 
            statistics_output
        ]
    )


demo.launch()