art-manuh's picture
Update app.py
4155848 verified
import os
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
import tensorflow as tf
import gradio as gr
import pickle
from keras.utils import pad_sequences
import json
from gradio_client import Client
# Load tokenizer and model
max_len = 200
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
model = tf.keras.models.load_model('toxic.h5')
arr = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
# Define the function to score comments
def score_comment(comment):
sequences = tokenizer.texts_to_sequences([comment])
inp = pad_sequences(sequences, maxlen=max_len)
results = model.predict(inp)
text = ''
for i in range(len(arr)):
text += '{}: {}\n'.format(arr[i], results[0][i] > 0.5)
return text
# Define API function
def predict_api(comment):
sequences = tokenizer.texts_to_sequences([comment])
inp = pad_sequences(sequences, maxlen=max_len)
results = model.predict(inp)
response = {arr[i]: results[0][i] > 0.5 for i in range(len(arr))}
return response
# Gradio Interface setup
inputs = gr.Textbox(lines=2, placeholder='Enter comment here...')
outputs = gr.Textbox()
# Create Gradio interface
interface = gr.Interface(fn=score_comment, inputs=inputs, outputs=outputs)
# API endpoint function (exposing the model via Gradio API)
def api_route(request):
if request.method == 'POST':
data = request.get_json()
comment = data.get('comment')
if not comment:
return json.dumps({"error": "Missing 'comment' in request"}), 400
prediction = predict_api(comment)
return json.dumps({"prediction": prediction})
# Launch the Gradio interface for the space (auto-shared link)
interface.launch(share=True)