samurai9776 commited on
Commit
acd00d4
Β·
verified Β·
1 Parent(s): 0570829

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +112 -24
app.py CHANGED
@@ -2,11 +2,12 @@ import gradio as gr
2
  from transformers import pipeline
3
  import torch
4
 
5
- # Load the model
6
  model_id = "samurai9776/thought-classifier"
7
  classifier = pipeline(
8
  "text-classification",
9
  model=model_id,
 
10
  device=0 if torch.cuda.is_available() else -1
11
  )
12
 
@@ -14,12 +15,12 @@ def classify_thought(ai_utterance, cx_utterance):
14
  """Classify if the conversation is complete or incomplete"""
15
 
16
  if not ai_utterance or not cx_utterance:
17
- return "Please enter both utterances", 0, 0
18
 
19
  # Combine utterances with [SEP] token
20
  text = f"{ai_utterance} [SEP] {cx_utterance}"
21
 
22
- # Get prediction
23
  results = classifier(text)
24
 
25
  # Extract scores
@@ -30,20 +31,42 @@ def classify_thought(ai_utterance, cx_utterance):
30
  # Determine prediction
31
  prediction = "Complete βœ“" if complete_score > incomplete_score else "Incomplete ⚠️"
32
 
33
- return prediction, complete_score, incomplete_score
 
 
 
 
 
 
34
 
35
  # Create Gradio interface
36
- with gr.Blocks(title="Thought Completion Classifier", theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
 
37
  gr.Markdown("""
38
  # πŸ€– Thought Completion Classifier
 
39
 
40
- This model determines if a conversation between an AI assistant and a customer represents a **complete** or **incomplete** thought.
 
41
 
42
- Enter both utterances below and click "Classify" to see the results.
 
 
 
43
  """)
44
 
45
  with gr.Row():
46
- with gr.Column():
47
  ai_input = gr.Textbox(
48
  label="AI Utterance",
49
  placeholder="e.g., What else can I get for you?",
@@ -51,40 +74,105 @@ with gr.Blocks(title="Thought Completion Classifier", theme=gr.themes.Soft()) as
51
  )
52
  cx_input = gr.Textbox(
53
  label="Customer Utterance",
54
- placeholder="e.g., That's all for now",
55
  lines=2
56
  )
57
 
58
- classify_btn = gr.Button("πŸ” Classify", variant="primary", size="lg")
 
 
59
 
60
- with gr.Column():
61
- prediction = gr.Textbox(label="Prediction", interactive=False)
 
 
 
 
62
  with gr.Row():
63
- complete_score = gr.Number(label="Complete Score", precision=3, interactive=False)
64
- incomplete_score = gr.Number(label="Incomplete Score", precision=3, interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- # Examples
 
67
  gr.Examples(
68
  examples=[
69
- ["Great. Would you like anything else?", "Picking up on app order."],
70
- ["What size would you like?", "Large please"],
71
- ["Your total is $15.99", "Actually, let me add"],
72
- ["Anything else I can get for you?", "No, that's it"],
73
- ["How can I help you today?", "I need to place an order"],
74
- ["Will that complete your order?", "Yes, that's everything"],
 
 
75
  ],
76
  inputs=[ai_input, cx_input],
77
- outputs=[prediction, complete_score, incomplete_score],
78
  fn=classify_thought,
79
  cache_examples=True,
 
80
  )
81
 
 
82
  classify_btn.click(
83
  fn=classify_thought,
84
  inputs=[ai_input, cx_input],
85
- outputs=[prediction, complete_score, incomplete_score]
 
 
 
 
 
 
86
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
- # Launch the app
89
  if __name__ == "__main__":
90
  demo.launch()
 
2
  from transformers import pipeline
3
  import torch
4
 
5
+ # Load the model with custom pipeline (includes built-in rules!)
6
  model_id = "samurai9776/thought-classifier"
7
  classifier = pipeline(
8
  "text-classification",
9
  model=model_id,
10
+ trust_remote_code=True, # Required for custom pipeline with rules
11
  device=0 if torch.cuda.is_available() else -1
12
  )
13
 
 
15
  """Classify if the conversation is complete or incomplete"""
16
 
17
  if not ai_utterance or not cx_utterance:
18
+ return "Please enter both utterances", 0, 0, ""
19
 
20
  # Combine utterances with [SEP] token
21
  text = f"{ai_utterance} [SEP] {cx_utterance}"
22
 
23
+ # Get prediction from pipeline (now includes built-in rules!)
24
  results = classifier(text)
25
 
26
  # Extract scores
 
31
  # Determine prediction
32
  prediction = "Complete βœ“" if complete_score > incomplete_score else "Incomplete ⚠️"
33
 
34
+ # Check if it's a rule-based decision (very high confidence)
35
+ if max(complete_score, incomplete_score) >= 0.94:
36
+ method = "Rule-based detection (linguistic pattern)"
37
+ else:
38
+ method = "Neural model prediction"
39
+
40
+ return prediction, complete_score, incomplete_score, method
41
 
42
  # Create Gradio interface
43
+ with gr.Blocks(
44
+ title="Thought Completion Classifier - Professional Edition",
45
+ theme=gr.themes.Soft(),
46
+ css="""
47
+ .gradio-container {
48
+ font-family: 'IBM Plex Sans', sans-serif;
49
+ }
50
+ .gr-button {
51
+ font-size: 16px;
52
+ }
53
+ """
54
+ ) as demo:
55
  gr.Markdown("""
56
  # πŸ€– Thought Completion Classifier
57
+ ### Professional Edition with Linguistic Rules Engine
58
 
59
+ This advanced model determines if a conversation represents a **complete** or **incomplete** thought.
60
+ It combines linguistic rules with neural networks for superior accuracy.
61
 
62
+ #### 🎯 Built-in Linguistic Rules:
63
+ - **Incomplete patterns**: Ends with 'a', 'and', 'with', 'to', 'for', 'of', 'or', 'what', 'get'
64
+ - **Complete patterns**: Contains 'that's all', 'nothing else', 'yes please', 'perfect'
65
+ - **Smart fallback**: Uses neural model for complex cases
66
  """)
67
 
68
  with gr.Row():
69
+ with gr.Column(scale=1):
70
  ai_input = gr.Textbox(
71
  label="AI Utterance",
72
  placeholder="e.g., What else can I get for you?",
 
74
  )
75
  cx_input = gr.Textbox(
76
  label="Customer Utterance",
77
+ placeholder="e.g., I need a",
78
  lines=2
79
  )
80
 
81
+ with gr.Row():
82
+ classify_btn = gr.Button("πŸ” Classify", variant="primary", scale=2)
83
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary", scale=1)
84
 
85
+ with gr.Column(scale=1):
86
+ prediction = gr.Textbox(
87
+ label="Prediction",
88
+ interactive=False,
89
+ elem_classes=["prediction-box"]
90
+ )
91
  with gr.Row():
92
+ complete_score = gr.Number(
93
+ label="Complete Score",
94
+ precision=3,
95
+ interactive=False
96
+ )
97
+ incomplete_score = gr.Number(
98
+ label="Incomplete Score",
99
+ precision=3,
100
+ interactive=False
101
+ )
102
+ method = gr.Textbox(
103
+ label="Detection Method",
104
+ interactive=False,
105
+ elem_classes=["method-box"]
106
+ )
107
 
108
+ # Examples section
109
+ gr.Markdown("### πŸ“ Try These Examples:")
110
  gr.Examples(
111
  examples=[
112
+ ["What else?", "I need a", "Rule: Ends with 'a' β†’ Incomplete"],
113
+ ["Anything else?", "And", "Rule: Ends with 'and' β†’ Incomplete"],
114
+ ["Is that all?", "That's all", "Rule: Contains 'that's all' β†’ Complete"],
115
+ ["Your order?", "Perfect", "Rule: Contains 'perfect' β†’ Complete"],
116
+ ["Can I help you?", "I want to", "Rule: Ends with 'to' β†’ Incomplete"],
117
+ ["What would you like?", "I'll have the burger please", "Model: Complex sentence"],
118
+ ["Ready to checkout?", "Actually let me add", "Rule: Contains 'let me add' β†’ Incomplete"],
119
+ ["Anything else today?", "No thanks", "Rule: Contains 'no thanks' β†’ Complete"],
120
  ],
121
  inputs=[ai_input, cx_input],
122
+ outputs=[prediction, complete_score, incomplete_score, method],
123
  fn=classify_thought,
124
  cache_examples=True,
125
+ label="Click any example to test it"
126
  )
127
 
128
+ # Event handlers
129
  classify_btn.click(
130
  fn=classify_thought,
131
  inputs=[ai_input, cx_input],
132
+ outputs=[prediction, complete_score, incomplete_score, method]
133
+ )
134
+
135
+ clear_btn.click(
136
+ fn=lambda: ("", "", "", 0, 0, ""),
137
+ inputs=[],
138
+ outputs=[ai_input, cx_input, prediction, complete_score, incomplete_score, method]
139
  )
140
+
141
+ # API usage section
142
+ with gr.Accordion("πŸ”§ API Usage", open=False):
143
+ gr.Markdown("""
144
+ ### Use this model in your code:
145
+ ```python
146
+ from transformers import pipeline
147
+
148
+ # Load the classifier with built-in rules
149
+ classifier = pipeline(
150
+ "text-classification",
151
+ model="samurai9776/thought-classifier",
152
+ trust_remote_code=True # Required for rules engine
153
+ )
154
+
155
+ # Example usage
156
+ result = classifier("What else? [SEP] I need a")
157
+ print(result) # [{'label': 'INCOMPLETE', 'score': 0.95}]
158
+ ```
159
+
160
+ ### Model Information:
161
+ - **Base Model**: DistilBERT (66M parameters)
162
+ - **Enhancement**: Custom pipeline with linguistic rules
163
+ - **Training Data**: 900+ labeled conversations
164
+ - **Accuracy**: 90%+ with rules engine
165
+ """)
166
+
167
+ # Footer
168
+ gr.Markdown("""
169
+ ---
170
+ <div style='text-align: center; color: #666;'>
171
+ <p>Model by <a href='https://huggingface.co/samurai9776' target='_blank'>samurai9776</a> |
172
+ <a href='https://huggingface.co/samurai9776/thought-classifier' target='_blank'>View Model</a> |
173
+ Built with πŸ€— Transformers</p>
174
+ </div>
175
+ """)
176
 
 
177
  if __name__ == "__main__":
178
  demo.launch()