art-manuh commited on
Commit
4155848
·
verified ·
1 Parent(s): bef2658

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -7
app.py CHANGED
@@ -3,21 +3,21 @@ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
3
 
4
  import tensorflow as tf
5
  import gradio as gr
6
- import pandas as pd
7
  import pickle
8
  from keras.utils import pad_sequences
 
 
9
 
 
10
  max_len = 200
11
-
12
- # Load the tokenizer
13
  with open('tokenizer.pickle', 'rb') as handle:
14
  tokenizer = pickle.load(handle)
15
 
16
- # Load the pre-trained model
17
  model = tf.keras.models.load_model('toxic.h5')
18
 
19
  arr = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
20
 
 
21
  def score_comment(comment):
22
  sequences = tokenizer.texts_to_sequences([comment])
23
  inp = pad_sequences(sequences, maxlen=max_len)
@@ -27,11 +27,30 @@ def score_comment(comment):
27
  text += '{}: {}\n'.format(arr[i], results[0][i] > 0.5)
28
  return text
29
 
30
- # Update the Gradio interface to use the latest syntax
31
- inputs = gr.Textbox(lines=2, placeholder='Comment to score')
 
 
 
 
 
 
 
 
32
  outputs = gr.Textbox()
33
 
 
34
  interface = gr.Interface(fn=score_comment, inputs=inputs, outputs=outputs)
35
 
36
- # Launch the interface
 
 
 
 
 
 
 
 
 
 
37
  interface.launch(share=True)
 
3
 
4
  import tensorflow as tf
5
  import gradio as gr
 
6
  import pickle
7
  from keras.utils import pad_sequences
8
+ import json
9
+ from gradio_client import Client
10
 
11
+ # Load tokenizer and model
12
  max_len = 200
 
 
13
  with open('tokenizer.pickle', 'rb') as handle:
14
  tokenizer = pickle.load(handle)
15
 
 
16
  model = tf.keras.models.load_model('toxic.h5')
17
 
18
  arr = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
19
 
20
+ # Define the function to score comments
21
  def score_comment(comment):
22
  sequences = tokenizer.texts_to_sequences([comment])
23
  inp = pad_sequences(sequences, maxlen=max_len)
 
27
  text += '{}: {}\n'.format(arr[i], results[0][i] > 0.5)
28
  return text
29
 
30
+ # Define API function
31
+ def predict_api(comment):
32
+ sequences = tokenizer.texts_to_sequences([comment])
33
+ inp = pad_sequences(sequences, maxlen=max_len)
34
+ results = model.predict(inp)
35
+ response = {arr[i]: results[0][i] > 0.5 for i in range(len(arr))}
36
+ return response
37
+
38
+ # Gradio Interface setup
39
+ inputs = gr.Textbox(lines=2, placeholder='Enter comment here...')
40
  outputs = gr.Textbox()
41
 
42
+ # Create Gradio interface
43
  interface = gr.Interface(fn=score_comment, inputs=inputs, outputs=outputs)
44
 
45
+ # API endpoint function (exposing the model via Gradio API)
46
+ def api_route(request):
47
+ if request.method == 'POST':
48
+ data = request.get_json()
49
+ comment = data.get('comment')
50
+ if not comment:
51
+ return json.dumps({"error": "Missing 'comment' in request"}), 400
52
+ prediction = predict_api(comment)
53
+ return json.dumps({"prediction": prediction})
54
+
55
+ # Launch the Gradio interface for the space (auto-shared link)
56
  interface.launch(share=True)