luguog commited on
Commit
7c1863e
·
verified ·
1 Parent(s): b49a1a2

Delete App.py

Browse files
Files changed (1) hide show
  1. App.py +0 -764
App.py DELETED
@@ -1,764 +0,0 @@
1
- Or rate import gradio as gr
2
- import random
3
- import time
4
- import datetime
5
- import json
6
- import re
7
- from typing import Dict, List, Tuple, Optional
8
-
9
- # JavaScript code for Transformers.js speech processing
10
- transformers_js_code = """
11
- <script src="https://cdn.jsdelivr.net/npm/@xenova/[email protected]"></script>
12
- <script>
13
- // Initialize Transformers.js pipelines
14
- let sttPipeline = null;
15
- let ttsPipeline = null;
16
- let speakerEmbeddings = null;
17
-
18
- // Load models asynchronously
19
- async function loadModels() {
20
- try {
21
- // Show loading status
22
- document.getElementById("status-text").innerText = "Loading speech models...";
23
-
24
- // Load speech-to-text model
25
- const { pipeline } = await import('https://cdn.jsdelivr.net/npm/@xenova/[email protected]');
26
- sttPipeline = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny');
27
-
28
- // Load text-to-speech model and embeddings
29
- ttsPipeline = await pipeline('text-to-speech', 'Xenova/speecht5_tts');
30
- const { loadSpeakerEmbeddings } = await import('https://cdn.jsdelivr.net/npm/@xenova/[email protected]');
31
- speakerEmbeddings = await loadSpeakerEmbeddings('Xenova/speecht5_vc_pt_sd_epoch_1000');
32
-
33
- console.log('All models loaded successfully');
34
- document.getElementById("status-text").innerText = "Ready to listen";
35
- } catch (error) {
36
- console.error('Error loading models:', error);
37
- document.getElementById("status-text").innerText = "Error loading models";
38
- }
39
- }
40
-
41
- // Transcribe audio using Transformers.js
42
- async function transcribeAudio(audioBlob) {
43
- if (!sttPipeline) {
44
- console.error('Speech-to-text model not loaded');
45
- return "Speech recognition not ready";
46
- }
47
-
48
- try {
49
- document.getElementById("status-text").innerText = "Processing speech...";
50
- const output = await sttPipeline(audioBlob, {
51
- language: 'english',
52
- task: 'transcribe',
53
- });
54
- document.getElementById("status-text").innerText = "Ready to listen";
55
- return output.text;
56
- } catch (error) {
57
- console.error('Transcription error:', error);
58
- document.getElementById("status-text").innerText = "Ready to listen";
59
- return "I couldn't understand that";
60
- }
61
- }
62
-
63
- // Generate speech using Transformers.js
64
- async function generateSpeech(text) {
65
- if (!ttsPipeline || !speakerEmbeddings) {
66
- console.error('Text-to-speech model not loaded');
67
- return null;
68
- }
69
-
70
- try {
71
- const audio = await ttsPipeline(text, {
72
- speaker_embeddings: speakerEmbeddings,
73
- });
74
- return URL.createObjectURL(audio);
75
- } catch (error) {
76
- console.error('Speech generation error:', error);
77
- return null;
78
- }
79
- }
80
-
81
- // Initialize models when page loads
82
- window.addEventListener('DOMContentLoaded', () => {
83
- loadModels();
84
- console.log('Transformers.js initialized');
85
- });
86
-
87
- // Function to handle audio recording and transcription
88
- async function handleAudioRecording(audioBlob) {
89
- const transcript = await transcribeAudio(audioBlob);
90
- if (transcript && transcript !== "I couldn't understand that") {
91
- // Send the transcript to Gradio
92
- const hiddenTextbox = document.querySelector('#hidden-transcript textarea');
93
- if (hiddenTextbox) {
94
- hiddenTextbox.value = transcript;
95
- hiddenTextbox.dispatchEvent(new Event('input', { bubbles: true }));
96
- hiddenTextbox.dispatchEvent(new Event('change', { bubbles: true }));
97
- }
98
- }
99
- }
100
-
101
- // Function to speak text
102
- async function speakText(text) {
103
- const audioUrl = await generateSpeech(text);
104
- if (audioUrl) {
105
- const audio = new Audio(audioUrl);
106
- audio.play();
107
- }
108
- }
109
-
110
- // Add event listener for when the bot response updates
111
- function setupResponseObserver() {
112
- const targetNode = document.querySelector('#ai-response textarea');
113
- if (targetNode) {
114
- const config = { characterData: true, childList: true, subtree: true };
115
- const callback = function(mutationsList, observer) {
116
- for (const mutation of mutationsList) {
117
- if (mutation.type === 'childList' || mutation.type === 'characterData') {
118
- const responseText = targetNode.value;
119
- if (responseText && !responseText.includes('Hello! I\'m your AppleCare concierge')) {
120
- speakText(responseText);
121
- }
122
- }
123
- }
124
- };
125
- const observer = new MutationObserver(callback);
126
- observer.observe(targetNode, config);
127
- }
128
- }
129
-
130
- // Initialize when page loads
131
- window.addEventListener('DOMContentLoaded', () => {
132
- loadModels();
133
- setTimeout(setupResponseObserver, 2000); // Wait for Gradio to render
134
- });
135
- </script>
136
- """
137
-
138
- class AppleCareVoiceConcierge:
139
- def __init__(self):
140
- self.conversation_state = {
141
- "step": "greeting",
142
- "device": None,
143
- "issue": None,
144
- "location": None,
145
- "imei": None,
146
- "user_name": None,
147
- "estimated_cost": None,
148
- "nearest_store": None,
149
- "history": []
150
- }
151
-
152
- # Store locations database
153
- self.stores = {
154
- "10001": {"name": "Apple Fifth Avenue", "address": "767 5th Ave, New York, NY 10153", "phone": "(212) 336-1440"},
155
- "10029": {"name": "Apple Upper East Side", "address": "940 Madison Ave, New York, NY 10075", "phone": "(212) 284-1800"},
156
- "90210": {"name": "Apple Beverly Hills", "address": "444 N Rodeo Dr, Beverly Hills, CA 90210", "phone": "(310) 273-3000"},
157
- "94102": {"name": "Apple Union Square", "address": "300 Post St, San Francisco, CA 94108", "phone": "(415) 392-0202"},
158
- "60611": {"name": "Apple Michigan Avenue", "address": "401 N Michigan Ave, Chicago, IL 60611", "phone": "(312) 981-4104"},
159
- "75201": {"name": "Apple Northpark Center", "address": "8687 N Central Expy, Dallas, TX 75225", "phone": "(214) 965-0960"},
160
- "02116": {"name": "Apple Boylston Street", "address": "815 Boylston St, Boston, MA 02116", "phone": "(617) 385-9400"},
161
- "98101": {"name": "Apple University Village", "address": "4742 42nd Ave NE, Seattle, WA 98105", "phone": "(206) 892-0076"},
162
- "33139": {"name": "Apple Lincoln Road", "address": "1021 Lincoln Rd, Miami Beach, FL 33139", "phone": "(305) 421-0200"},
163
- "30309": {"name": "Apple Lenox Square", "address": "3393 Peachtree Rd NE, Atlanta, GA 30326", "phone": "(404) 816-9500"}
164
- }
165
-
166
- # City mappings
167
- self.city_mappings = {
168
- "new york": "10001",
169
- "nyc": "10001",
170
- "manhattan": "10001",
171
- "los angeles": "90210",
172
- "la": "90210",
173
- "beverly hills": "90210",
174
- "san francisco": "94102",
175
- "sf": "94102",
176
- "chicago": "60611",
177
- "dallas": "75201",
178
- "boston": "02116",
179
- "seattle": "98101",
180
- "miami": "33139",
181
- "atlanta": "30309"
182
- }
183
-
184
- # Repair costs
185
- self.repair_costs = {
186
- "iphone": {
187
- "screen": "$279",
188
- "battery": "$89",
189
- "camera": "$149",
190
- "water": "$99 diagnostic + repair cost",
191
- "speaker": "$169",
192
- "charging": "$99"
193
- },
194
- "ipad": {
195
- "screen": "$399",
196
- "battery": "$129",
197
- "camera": "$199",
198
- "water": "$149 diagnostic + repair cost",
199
- "speaker": "$149",
200
- "charging": "$149"
201
- },
202
- "mac": {
203
- "screen": "$599",
204
- "battery": "$199",
205
- "keyboard": "$249",
206
- "trackpad": "$179",
207
- "water": "$299 diagnostic + repair cost"
208
- },
209
- "watch": {
210
- "screen": "$249",
211
- "battery": "$79",
212
- "water": "$229 service exchange"
213
- }
214
- }
215
-
216
- def reset_conversation(self):
217
- """Reset conversation state"""
218
- self.conversation_state = {
219
- "step": "greeting",
220
- "device": None,
221
- "issue": None,
222
- "location": None,
223
- "imei": None,
224
- "user_name": None,
225
- "estimated_cost": None,
226
- "nearest_store": None,
227
- "history": []
228
- }
229
-
230
- def extract_device_info(self, text: str) -> Optional[str]:
231
- """Extract device type from user input"""
232
- text_lower = text.lower()
233
-
234
- if any(word in text_lower for word in ["iphone", "phone"]):
235
- return "iphone"
236
- elif any(word in text_lower for word in ["ipad", "tablet"]):
237
- return "ipad"
238
- elif any(word in text_lower for word in ["mac", "macbook", "laptop", "computer", "imac"]):
239
- return "mac"
240
- elif any(word in text_lower for word in ["watch", "apple watch"]):
241
- return "watch"
242
- return None
243
-
244
- def extract_issue_info(self, text: str, device: str) -> Tuple[Optional[str], Optional[str]]:
245
- """Extract issue type and cost from user input"""
246
- text_lower = text.lower()
247
-
248
- issue_keywords = {
249
- "screen": ["screen", "crack", "broken", "display", "shatter"],
250
- "battery": ["battery", "charge", "power", "drain", "dead"],
251
- "camera": ["camera", "photo", "lens", "focus"],
252
- "water": ["water", "wet", "liquid", "rain", "drop", "spill"],
253
- "speaker": ["speaker", "sound", "audio", "volume"],
254
- "charging": ["charging", "port", "cable", "connector"],
255
- "keyboard": ["keyboard", "key", "typing"],
256
- "trackpad": ["trackpad", "mouse", "cursor", "click"]
257
- }
258
-
259
- for issue_type, keywords in issue_keywords.items():
260
- if any(keyword in text_lower for keyword in keywords):
261
- if device in self.repair_costs and issue_type in self.repair_costs[device]:
262
- return issue_type, self.repair_costs[device][issue_type]
263
-
264
- return None, None
265
-
266
- def extract_location_info(self, text: str) -> Optional[Dict]:
267
- """Extract location from user input"""
268
- text_lower = text.lower()
269
-
270
- # Check for ZIP codes
271
- zip_match = re.search(r'\b\d{5}\b', text)
272
- if zip_match:
273
- zip_code = zip_match.group()
274
- if zip_code in self.stores:
275
- return self.stores[zip_code]
276
- else:
277
- return self.stores["10001"] # Default to NYC
278
-
279
- # Check for city names
280
- for city, zip_code in self.city_mappings.items():
281
- if city in text_lower:
282
- return self.stores[zip_code]
283
-
284
- return None
285
-
286
- def extract_imei(self, text: str) -> Optional[str]:
287
- """Extract IMEI or serial number from user input"""
288
- # Look for sequences of alphanumeric characters (10+ chars)
289
- imei_match = re.search(r'\b[A-Za-z0-9]{10,}\b', text.replace(" ", ""))
290
- if imei_match:
291
- return imei_match.group()
292
- return None
293
-
294
- def generate_appointment_details(self) -> Dict:
295
- """Generate realistic appointment details"""
296
- # Generate appointment for next business day
297
- tomorrow = datetime.datetime.now() + datetime.timedelta(days=1)
298
- if tomorrow.weekday() >= 5: # Weekend
299
- tomorrow += datetime.timedelta(days=(7 - tomorrow.weekday()))
300
-
301
- # Available time slots
302
- time_slots = ["9:00 AM", "10:30 AM", "12:00 PM", "1:30 PM", "3:00 PM", "4:30 PM"]
303
- selected_time = random.choice(time_slots)
304
-
305
- # Generate confirmation number
306
- confirmation = "AC" + "".join(random.choices("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ", k=6))
307
-
308
- return {
309
- "date": tomorrow.strftime("%A, %B %d, %Y"),
310
- "time": selected_time,
311
- "confirmation": confirmation
312
- }
313
-
314
- def process_conversation(self, user_input: str) -> str:
315
- """Main conversation processing logic"""
316
- if not user_input.strip():
317
- return "I didn't catch that. Could you please speak again?"
318
-
319
- # Add to history
320
- self.conversation_state["history"].append({"role": "user", "content": user_input})
321
-
322
- current_step = self.conversation_state["step"]
323
-
324
- if current_step == "greeting":
325
- # Extract device information
326
- device = self.extract_device_info(user_input)
327
- if device:
328
- self.conversation_state["device"] = device
329
- self.conversation_state["step"] = "issue_identification"
330
- device_name = device.title() if device != "iphone" else "iPhone"
331
- if device == "mac":
332
- device_name = "Mac"
333
- response = f"I can help with your {device_name}. What seems to be the problem? For example, is the screen damaged, battery issues, or something else?"
334
- else:
335
- response = "I can help with iPhone, iPad, Mac, or Apple Watch repairs. Which device needs assistance today?"
336
-
337
- elif current_step == "issue_identification":
338
- # Extract issue information
339
- issue_type, cost = self.extract_issue_info(user_input, self.conversation_state["device"])
340
- if issue_type and cost:
341
- self.conversation_state["issue"] = issue_type
342
- self.conversation_state["estimated_cost"] = cost
343
- self.conversation_state["step"] = "location_gathering"
344
- device_name = self.conversation_state["device"].title()
345
- if self.conversation_state["device"] == "iphone":
346
- device_name = "iPhone"
347
- elif self.conversation_state["device"] == "mac":
348
- device_name = "Mac"
349
- response = f"I understand you need {issue_type} repair for your {device_name}. The estimated cost is {cost}. To find the nearest Apple Store, could you tell me your ZIP code or city?"
350
- else:
351
- response = "Could you describe the issue in more detail? For example, is it a cracked screen, battery problem, water damage, or something else?"
352
-
353
- elif current_step == "location_gathering":
354
- # Extract location information
355
- store_info = self.extract_location_info(user_input)
356
- if store_info:
357
- self.conversation_state["nearest_store"] = store_info
358
- self.conversation_state["step"] = "imei_gathering"
359
- response = f"Perfect! The nearest Apple Store is {store_info['name']} at {store_info['address']}. For the appointment, I'll need your device's IMEI or serial number. You can find this in Settings > General > About, or you can say 'skip' if you don't have it available."
360
- else:
361
- response = "I need your location to find the nearest Apple Store. Could you please provide your ZIP code or city name?"
362
-
363
- elif current_step == "imei_gathering":
364
- if "skip" in user_input.lower():
365
- self.conversation_state["imei"] = "Will verify at appointment"
366
- else:
367
- imei = self.extract_imei(user_input)
368
- self.conversation_state["imei"] = imei if imei else "Will verify at appointment"
369
-
370
- self.conversation_state["step"] = "confirmation"
371
- device_name = self.conversation_state["device"].title()
372
- if self.conversation_state["device"] == "iphone":
373
- device_name = "iPhone"
374
- elif self.conversation_state["device"] == "mac":
375
- device_name = "Mac"
376
-
377
- response = f"""Let me confirm your repair appointment:
378
-
379
- 📱 Device: {device_name}
380
- 🔧 Issue: {self.conversation_state["issue"].title()} repair
381
- 💰 Estimated Cost: {self.conversation_state["estimated_cost"]}
382
- 🏪 Location: {self.conversation_state["nearest_store"]["name"]}
383
- 📍 Address: {self.conversation_state["nearest_store"]["address"]}
384
- 📱 IMEI: {self.conversation_state["imei"]}
385
-
386
- Should I proceed with booking this appointment? Say 'yes' to confirm or 'no' to start over."""
387
-
388
- elif current_step == "confirmation":
389
- if any(word in user_input.lower() for word in ["yes", "confirm", "book", "schedule", "proceed"]):
390
- # Generate appointment details
391
- appointment = self.generate_appointment_details()
392
- self.conversation_state["step"] = "completed"
393
-
394
- device_name = self.conversation_state["device"].title()
395
- if self.conversation_state["device"] == "iphone":
396
- device_name = "iPhone"
397
- elif self.conversation_state["device"] == "mac":
398
- device_name = "Mac"
399
-
400
- response = f"""✅ Appointment Successfully Booked!
401
-
402
- 🎫 Confirmation Number: {appointment["confirmation"]}
403
- 📅 Date: {appointment["date"]}
404
- 🕐 Time: {appointment["time"]}
405
- 🏪 Store: {self.conversation_state["nearest_store"]["name"]}
406
- 📞 Store Phone: {self.conversation_state["nearest_store"]["phone"]}
407
- 📍 Address: {self.conversation_state["nearest_store"]["address"]}
408
-
409
- 📝 What to bring:
410
- • Government-issued photo ID
411
- • Your {device_name}
412
- • Proof of purchase (if available)
413
-
414
- ⚠️ Before your appointment:
415
- • Back up your device
416
- • Turn off Find My iPhone (if applicable)
417
- • Remove any cases or screen protectors
418
-
419
- Your repair is scheduled! A confirmation has been sent to your Apple ID email. Is there anything else I can help you with today?"""
420
- elif any(word in user_input.lower() for word in ["no", "cancel", "start over"]):
421
- self.reset_conversation()
422
- response = "No problem! Let's start fresh. What device needs repair today?"
423
- else:
424
- response = "I need you to confirm the appointment. Please say 'yes' to proceed with booking or 'no' to start over."
425
-
426
- elif current_step == "completed":
427
- if any(word in user_input.lower() for word in ["thank", "thanks", "bye", "goodbye"]):
428
- response = "You're very welcome! Have a great day, and we'll see you at your appointment. If you need to reschedule or have questions, you can call the store directly or visit support.apple.com."
429
- elif any(word in user_input.lower() for word in ["new", "another", "different", "help"]):
430
- self.reset_conversation()
431
- response = "I'd be happy to help with another repair. What device needs assistance today?"
432
- else:
433
- response = "Your appointment is all set! Is there anything else I can help you with today, or would you like to schedule another repair?"
434
-
435
- # Add response to history
436
- self.conversation_state["history"].append({"role": "assistant", "content": response})
437
-
438
- return response
439
-
440
- # Initialize the concierge
441
- concierge = AppleCareVoiceConcierge()
442
-
443
- def process_transcribed_text(transcribed_text):
444
- """Process transcribed text from speech input"""
445
- if not transcribed_text.strip():
446
- return "I didn't catch that. Please try speaking again.", concierge.conversation_state["history"]
447
-
448
- response = concierge.process_conversation(transcribed_text)
449
-
450
- # Format conversation history for display
451
- history = []
452
- for msg in concierge.conversation_state["history"][-10:]:
453
- role = "🗣️ You" if msg["role"] == "user" else "🤖 Concierge"
454
- history.append(f"{role}: {msg['content']}")
455
-
456
- return response, "\n\n".join(history)
457
-
458
- def text_input_handler(text_input):
459
- """Handle text input for testing"""
460
- if not text_input.strip():
461
- return "Please enter a message.", concierge.conversation_state["history"]
462
-
463
- response = concierge.process_conversation(text_input)
464
-
465
- # Format conversation history for display
466
- history = []
467
- for msg in concierge.conversation_state["history"][-10:]:
468
- role = "🗣️ You" if msg["role"] == "user" else "🤖 Concierge"
469
- history.append(f"{role}: {msg['content']}")
470
-
471
- return response, "\n\n".join(history)
472
-
473
- def reset_conversation():
474
- """Reset the conversation"""
475
- concierge.reset_conversation()
476
- return "Conversation reset! Hello! I'm your AppleCare concierge. How can I help with your device today?", ""
477
-
478
- # Create Gradio interface with mystical orange and dark blue neomorphic design
479
- def create_interface():
480
- with gr.Blocks(
481
- css="""
482
- .gradio-container {
483
- max-width: 900px !important;
484
- margin: 0 auto !important;
485
- font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
486
- }
487
- .main-container {
488
- background: linear-gradient(145deg, #1a1f35, #0d1226);
489
- border-radius: 24px;
490
- box-shadow: 20px 20px 60px #0a0e1d, -20px -20px 60px #20284d;
491
- padding: 30px;
492
- margin: 20px auto;
493
- border: 1px solid #ff7b25;
494
- }
495
- .title {
496
- text-align: center;
497
- color: #ff7b25;
498
- margin-bottom: 25px;
499
- font-weight: 800;
500
- font-size: 32px;
501
- text-shadow: 0 0 10px rgba(255, 123, 37, 0.5);
502
- }
503
- .subtitle {
504
- text-align: center;
505
- color: #64b5f6;
506
- margin-bottom: 30px;
507
- font-size: 18px;
508
- }
509
- .response-box {
510
- background: linear-gradient(145deg, #1e243b, #151a30);
511
- border-radius: 20px;
512
- box-shadow: inset 5px 5px 10px #0d111f, inset -5px -5px 10px #272f57;
513
- padding: 20px;
514
- margin: 15px 0;
515
- border: none;
516
- min-height: 200px;
517
- color: #ff7b25;
518
- font-weight: 500;
519
- }
520
- .conversation-history {
521
- background: linear-gradient(145deg, #1e243b, #151a30);
522
- border-radius: 20px;
523
- box-shadow: inset 5px 5px 10px #0d111f, inset -5px -5px 10px #272f57;
524
- padding: 20px;
525
- margin: 15px 0;
526
- border: none;
527
- min-height: 300px;
528
- max-height: 400px;
529
- overflow-y: auto;
530
- color: #64b5f6;
531
- }
532
- .btn-primary {
533
- background: linear-gradient(145deg, #ff7b25, #e55a00);
534
- border: none;
535
- border-radius: 16px;
536
- box-shadow: 5px 5px 10px #0d111f, -5px -5px 10px #272f57;
537
- color: #0d1226;
538
- padding: 12px 25px;
539
- margin: 8px;
540
- transition: all 0.3s ease;
541
- font-weight: 600;
542
- }
543
- .btn-primary:hover {
544
- box-shadow: 3px 3px 6px #0d111f, -3px -3px 6px #272f57;
545
- transform: translateY(2px);
546
- background: linear-gradient(145deg, #e55a00, #ff7b25);
547
- }
548
- .btn-secondary {
549
- background: linear-gradient(145deg, #64b5f6, #2196f3);
550
- border: none;
551
- border-radius: 16px;
552
- box-shadow: 5px 5px 10px #0d111f, -5px -5px 10px #272f57;
553
- color: #0d1226;
554
- padding: 12px 25px;
555
- margin: 8px;
556
- transition: all 0.3s ease;
557
- font-weight: 600;
558
- }
559
- .btn-secondary:hover {
560
- box-shadow: 3px 3px 6px #0d111f, -3px -3px 6px #272f57;
561
- transform: translateY(2px);
562
- background: linear-gradient(145deg, #2196f3, #64b5f6);
563
- }
564
- .btn-stop {
565
- background: linear-gradient(145deg, #f44336, #d32f2f);
566
- border: none;
567
- border-radius: 16px;
568
- box-shadow: 5px 5px 10px #0d111f, -5px -5px 10px #272f57;
569
- color: white;
570
- padding: 12px 25px;
571
- margin: 8px;
572
- transition: all 0.3s ease;
573
- font-weight: 600;
574
- }
575
- .btn-stop:hover {
576
- box-shadow: 3px 3px 6px #0d111f, -3px -3px 6px #272f57;
577
- transform: translateY(2px);
578
- background: linear-gradient(145deg, #d32f2f, #f44336);
579
- }
580
- .audio-input {
581
- border-radius: 16px;
582
- background: linear-gradient(145deg, #1e243b, #151a30);
583
- box-shadow: inset 5px 5px 10px #0d111f, inset -5px -5px 10px #272f57;
584
- padding: 15px;
585
- margin: 10px 0;
586
- border: 1px solid #ff7b25;
587
- }
588
- .text-input {
589
- border-radius: 16px;
590
- background: linear-gradient(145deg, #1e243b, #151a30);
591
- box-shadow: inset 5px 5px 10px #0d111f, inset -5px -5px 10px #272f57;
592
- padding: 15px;
593
- margin: 10px 0;
594
- border: 1px solid #64b5f6;
595
- color: #64b5f6;
596
- }
597
- .label {
598
- font-weight: 600;
599
- color: #ff7b25;
600
- margin-bottom: 8px;
601
- display: block;
602
- }
603
- .footer {
604
- text-align: center;
605
- margin-top: 25px;
606
- color: #64b5f6;
607
- font-size: 14px;
608
- }
609
- .status-indicator {
610
- text-align: center;
611
- margin: 10px 0;
612
- color: #ff7b25;
613
- font-style: italic;
614
- }
615
- /* Scrollbar styling */
616
- .conversation-history::-webkit-scrollbar {
617
- width: 8px;
618
- }
619
- .conversation-history::-webkit-scrollbar-track {
620
- background: #151a30;
621
- border-radius: 4px;
622
- }
623
- .conversation-history::-webkit-scrollbar-thumb {
624
- background: #ff7b25;
625
- border-radius: 4px;
626
- }
627
- .conversation-history::-webkit-scrollbar-thumb:hover {
628
- background: #e55a00;
629
- }
630
- """
631
- ) as interface:
632
-
633
- # Add Transformers.js code
634
- gr.HTML(transformers_js_code)
635
-
636
- gr.HTML("""
637
- <div class="title">🍎 AppleCare Voice Concierge</div>
638
- <div class="subtitle">Your mystical AI-powered device repair assistant</div>
639
- """)
640
-
641
- with gr.Column(elem_classes="main-container"):
642
- # Status indicator
643
- gr.HTML("""
644
- <div class="status-indicator" id="status-text">
645
- Loading speech models...
646
- </div>
647
- """)
648
-
649
- # Voice input
650
- with gr.Row():
651
- audio_input = gr.Audio(
652
- sources=["microphone"],
653
- type="filepath",
654
- label="🎤 Click to speak - I'm listening",
655
- elem_classes="audio-input"
656
- )
657
-
658
- # Hidden transcript input for JavaScript to use
659
- hidden_transcript = gr.Textbox(visible=False, elem_id="hidden-transcript")
660
-
661
- # Text input for testing
662
- with gr.Row():
663
- text_input = gr.Textbox(
664
- placeholder="Or type your message here...",
665
- label="💬 Text Input",
666
- lines=2,
667
- elem_classes="text-input"
668
- )
669
-
670
- # Buttons
671
- with gr.Row():
672
- submit_text_btn = gr.Button("📝 Send Text", elem_classes="btn-secondary")
673
- reset_btn = gr.Button("🔄 Reset Conversation", elem_classes="btn-stop")
674
- speak_btn = gr.Button("🔊 Speak Response", elem_classes="btn-primary")
675
-
676
- # AI Response
677
- ai_response = gr.Textbox(
678
- label="🤖 Concierge Response",
679
- value="Hello! I'm your AppleCare concierge. How can I help with your device today?",
680
- lines=8,
681
- interactive=False,
682
- elem_classes="response-box",
683
- elem_id="ai-response"
684
- )
685
-
686
- # Conversation History
687
- conversation_history = gr.Textbox(
688
- label="📝 Conversation History",
689
- lines=10,
690
- interactive=False,
691
- elem_classes="conversation-history"
692
- )
693
-
694
- gr.HTML("""
695
- <div class="footer">
696
- <p>This AI concierge uses Transformers.js for speech processing - no API keys required</p>
697
- <p>✅ Check device issues • 📍 Find nearby Apple Stores • 📅 Schedule appointments • 💰 Get repair estimates</p>
698
- </div>
699
- """)
700
-
701
- # Event handlers
702
- hidden_transcript.change(
703
- fn=process_transcribed_text,
704
- inputs=[hidden_transcript],
705
- outputs=[ai_response, conversation_history]
706
- )
707
-
708
- submit_text_btn.click(
709
- fn=text_input_handler,
710
- inputs=[text_input],
711
- outputs=[ai_response, conversation_history]
712
- )
713
-
714
- text_input.submit(
715
- fn=text_input_handler,
716
- inputs=[text_input],
717
- outputs=[ai_response, conversation_history]
718
- )
719
-
720
- reset_btn.click(
721
- fn=reset_conversation,
722
- outputs=[ai_response, conversation_history]
723
- )
724
-
725
- # JavaScript for handling audio recording
726
- gr.HTML("""
727
- <script>
728
- document.addEventListener('DOMContentLoaded', function() {
729
- // Get the audio input element
730
- const audioInput = document.querySelector('input[type="file"]');
731
- if (audioInput) {
732
- audioInput.addEventListener('change', function(event) {
733
- const file = event.target.files[0];
734
- if (file) {
735
- // Handle the audio recording
736
- handleAudioRecording(file);
737
- }
738
- });
739
- }
740
-
741
- // Add event listener for the speak button
742
- const speakButton = document.querySelector('.btn-primary');
743
- if (speakButton) {
744
- speakButton.addEventListener('click', function() {
745
- const responseText = document.querySelector('#ai-response textarea').value;
746
- if (responseText) {
747
- speakText(responseText);
748
- }
749
- });
750
- }
751
- });
752
- </script>
753
- """)
754
-
755
- return interface
756
-
757
- # Launch the app
758
- if __name__ == "__main__":
759
- interface = create_interface()
760
- interface.launch(
761
- server_name="0.0.0.0",
762
- server_port=7860,
763
- share=False
764
- )