You might need to recode . Settings button doesn’t work and neither do some of the other buttons. Improve the test ai calling by having it call a number I give to it and have it learn. Also improve the training code and add reasoning. It’s too simple. Separate training into two buttons one for text tracing and one for voice answering training. - Follow Up Deployment
Browse files- index.html +258 -30
- prompts.txt +2 -1
index.html
CHANGED
@@ -563,7 +563,7 @@
|
|
563 |
</div>
|
564 |
</div>
|
565 |
|
566 |
-
<!--
|
567 |
<div id="trainingScreen" class="screen h-full flex flex-col">
|
568 |
<div class="px-4 pt-4">
|
569 |
<div class="flex items-center justify-between">
|
@@ -612,9 +612,14 @@
|
|
612 |
<div class="mt-auto pt-4 border-t border-gray-200 dark:border-gray-700">
|
613 |
<div class="flex gap-2">
|
614 |
<input id="trainingInput" type="text" class="ios-input flex-1" placeholder="Teach your assistant..." onkeypress="handleTrainingKeyPress(event)">
|
615 |
-
<
|
616 |
-
<
|
617 |
-
|
|
|
|
|
|
|
|
|
|
|
618 |
</div>
|
619 |
<p class="text-xs text-gray-500 dark:text-gray-400 mt-2 text-center">The AI learns from every interaction</p>
|
620 |
</div>
|
@@ -622,8 +627,8 @@
|
|
622 |
</div>
|
623 |
</div>
|
624 |
|
625 |
-
<!--
|
626 |
-
<div class="fixed
|
627 |
<button class="flex flex-col items-center text-xs" onclick="showScreen('homeScreen')">
|
628 |
<i class="fas fa-home mb-1"></i>
|
629 |
<span>Home</span>
|
@@ -642,6 +647,49 @@
|
|
642 |
</button>
|
643 |
</div>
|
644 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
645 |
<!-- AI Chat Test Screen -->
|
646 |
<div id="chatScreen" class="screen h-full flex flex-col">
|
647 |
<div class="px-4 pt-4">
|
@@ -804,13 +852,26 @@
|
|
804 |
</div>
|
805 |
|
806 |
<script>
|
807 |
-
//
|
808 |
let trainingData = {
|
809 |
-
|
810 |
-
|
811 |
-
|
812 |
-
|
813 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
814 |
customResponses: {
|
815 |
business: '"Hello, this is [Your Name]\'s assistant. How can I help you today?"',
|
816 |
personal: '"Hi, this is [Name] calling. How can I help you?"',
|
@@ -830,33 +891,49 @@
|
|
830 |
localStorage.setItem('AI_Training', JSON.stringify(trainingData));
|
831 |
}
|
832 |
|
833 |
-
//
|
834 |
function showScreen(screenId) {
|
|
|
835 |
document.querySelectorAll('.screen').forEach(screen => {
|
836 |
screen.classList.remove('active');
|
837 |
});
|
838 |
-
document.getElementById(screenId).classList.add('active');
|
839 |
|
840 |
-
//
|
841 |
-
const
|
|
|
|
|
|
|
|
|
842 |
tabs.forEach(tab => {
|
843 |
-
tab.classList.remove('text-accent');
|
844 |
tab.classList.add('text-gray-500');
|
845 |
});
|
846 |
|
847 |
-
//
|
848 |
-
const
|
849 |
-
|
850 |
-
|
851 |
-
|
852 |
-
|
853 |
-
|
|
|
854 |
}
|
855 |
|
856 |
-
//
|
857 |
-
|
858 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
859 |
}
|
|
|
|
|
|
|
860 |
}
|
861 |
|
862 |
// Save call timing settings to localStorage
|
@@ -919,7 +996,26 @@
|
|
919 |
}
|
920 |
|
921 |
// Answer call function
|
922 |
-
function answerCall(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
923 |
const fab = document.querySelector('.fab');
|
924 |
const icon = fab.querySelector('i');
|
925 |
|
@@ -1078,8 +1174,72 @@
|
|
1078 |
}
|
1079 |
}
|
1080 |
|
1081 |
-
//
|
1082 |
function processTrainingInput(inputText) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1083 |
// First analyze diction and emotional context
|
1084 |
analyzeDiction(inputText);
|
1085 |
|
@@ -1116,7 +1276,75 @@
|
|
1116 |
}
|
1117 |
|
1118 |
// Grok-style diction analysis
|
1119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1120 |
const words = text.toLowerCase().split(/\s+/);
|
1121 |
const contractions = text.match(/\w+'?\w*/g) || [];
|
1122 |
|
|
|
563 |
</div>
|
564 |
</div>
|
565 |
|
566 |
+
<!-- Text Training Screen -->
|
567 |
<div id="trainingScreen" class="screen h-full flex flex-col">
|
568 |
<div class="px-4 pt-4">
|
569 |
<div class="flex items-center justify-between">
|
|
|
612 |
<div class="mt-auto pt-4 border-t border-gray-200 dark:border-gray-700">
|
613 |
<div class="flex gap-2">
|
614 |
<input id="trainingInput" type="text" class="ios-input flex-1" placeholder="Teach your assistant..." onkeypress="handleTrainingKeyPress(event)">
|
615 |
+
<div class="flex gap-2">
|
616 |
+
<button class="w-12 h-12 rounded-xl bg-blue-500 flex items-center justify-center text-white" onclick="submitTextTraining()">
|
617 |
+
<i class="fas fa-keyboard"></i>
|
618 |
+
</button>
|
619 |
+
<button class="w-12 h-12 rounded-xl bg-purple-500 flex items-center justify-center text-white" onclick="showScreen('voiceTrainingScreen')">
|
620 |
+
<i class="fas fa-microphone"></i>
|
621 |
+
</button>
|
622 |
+
</div>
|
623 |
</div>
|
624 |
<p class="text-xs text-gray-500 dark:text-gray-400 mt-2 text-center">The AI learns from every interaction</p>
|
625 |
</div>
|
|
|
627 |
</div>
|
628 |
</div>
|
629 |
|
630 |
+
<!-- Enhanced Navigation Tab Bar -->
|
631 |
+
<div class="tab-bar fixed inset-x-0 bottom-0 bg-white dark:bg-gray-800 shadow-lg p-2 flex justify-around items-center border-t border-gray-200 dark:border-gray-700">
|
632 |
<button class="flex flex-col items-center text-xs" onclick="showScreen('homeScreen')">
|
633 |
<i class="fas fa-home mb-1"></i>
|
634 |
<span>Home</span>
|
|
|
647 |
</button>
|
648 |
</div>
|
649 |
|
650 |
+
<!-- Voice Training Screen -->
|
651 |
+
<div id="voiceTrainingScreen" class="screen h-full flex flex-col">
|
652 |
+
<div class="px-4 pt-4">
|
653 |
+
<div class="flex items-center justify-between">
|
654 |
+
<div class="flex items-center">
|
655 |
+
<button class="p-2 rounded-full" onclick="showScreen('trainingScreen')">
|
656 |
+
<i class="fas fa-arrow-left"></i>
|
657 |
+
</button>
|
658 |
+
<div class="ml-2">
|
659 |
+
<h2 class="text-xl font-bold">Voice Training</h2>
|
660 |
+
<p class="text-gray-500 dark:text-gray-400 text-sm -mt-1">Train vocal responses</p>
|
661 |
+
</div>
|
662 |
+
</div>
|
663 |
+
</div>
|
664 |
+
</div>
|
665 |
+
|
666 |
+
<div class="mt-4 px-4 flex-1 overflow-hidden flex flex-col">
|
667 |
+
<div class="bg-white dark:bg-gray-800 rounded-2xl p-5 flex-1 overflow-hidden flex flex-col">
|
668 |
+
<div id="voiceFeedback" class="flex-1 overflow-y-auto pb-4">
|
669 |
+
<div class="ai-bubble">
|
670 |
+
<p>Let's train your AI's verbal responses. Start by saying sample phrases you'd like it to use.</p>
|
671 |
+
</div>
|
672 |
+
</div>
|
673 |
+
|
674 |
+
<div class="mt-auto pt-4 border-t border-gray-200 dark:border-gray-700">
|
675 |
+
<div class="flex flex-col gap-3">
|
676 |
+
<div class="flex gap-2">
|
677 |
+
<button id="startRecording" class="flex-1 h-12 rounded-xl bg-red-500 text-white" onclick="startVoiceTraining()">
|
678 |
+
<i class="fas fa-microphone mr-2"></i> Record Response
|
679 |
+
</button>
|
680 |
+
<button class="w-12 h-12 rounded-xl bg-gray-200 flex items-center justify-center" onclick="playLastResponse()">
|
681 |
+
<i class="fas fa-play"></i>
|
682 |
+
</button>
|
683 |
+
</div>
|
684 |
+
<div class="text-center">
|
685 |
+
<p class="text-xs text-gray-500 dark:text-gray-400">Analyzing: Tone, Clarity, Pace</p>
|
686 |
+
</div>
|
687 |
+
</div>
|
688 |
+
</div>
|
689 |
+
</div>
|
690 |
+
</div>
|
691 |
+
</div>
|
692 |
+
|
693 |
<!-- AI Chat Test Screen -->
|
694 |
<div id="chatScreen" class="screen h-full flex flex-col">
|
695 |
<div class="px-4 pt-4">
|
|
|
852 |
</div>
|
853 |
|
854 |
<script>
|
855 |
+
// Enhanced Training Data Structure
|
856 |
let trainingData = {
|
857 |
+
// Text-based training
|
858 |
+
textTraining: {
|
859 |
+
responseTemplates: {},
|
860 |
+
patternMatching: {},
|
861 |
+
contextRules: {},
|
862 |
+
learningMetrics: {
|
863 |
+
responseAccuracy: 0.85,
|
864 |
+
confidenceScores: {}
|
865 |
+
}
|
866 |
+
},
|
867 |
+
|
868 |
+
// Voice-based training
|
869 |
+
voiceTraining: {
|
870 |
+
speechProfiles: {},
|
871 |
+
toneAnalysis: {},
|
872 |
+
voiceResponses: [],
|
873 |
+
pronunciation: {}
|
874 |
+
},
|
875 |
customResponses: {
|
876 |
business: '"Hello, this is [Your Name]\'s assistant. How can I help you today?"',
|
877 |
personal: '"Hi, this is [Name] calling. How can I help you?"',
|
|
|
891 |
localStorage.setItem('AI_Training', JSON.stringify(trainingData));
|
892 |
}
|
893 |
|
894 |
+
// Enhanced Navigation System
|
895 |
function showScreen(screenId) {
|
896 |
+
// Hide all screens
|
897 |
document.querySelectorAll('.screen').forEach(screen => {
|
898 |
screen.classList.remove('active');
|
899 |
});
|
|
|
900 |
|
901 |
+
// Show selected screen with animation
|
902 |
+
const targetScreen = document.getElementById(screenId);
|
903 |
+
targetScreen.classList.add('active');
|
904 |
+
|
905 |
+
// Apply active state to tab bar
|
906 |
+
const tabs = document.querySelectorAll('.tab-bar button');
|
907 |
tabs.forEach(tab => {
|
908 |
+
tab.classList.remove('text-accent', 'font-medium');
|
909 |
tab.classList.add('text-gray-500');
|
910 |
});
|
911 |
|
912 |
+
// Find and activate matching tab
|
913 |
+
const activeIndex = Array.from(tabs).findIndex(tab =>
|
914 |
+
tab.getAttribute('onclick').includes(screenId)
|
915 |
+
);
|
916 |
+
|
917 |
+
if (activeIndex >= 0) {
|
918 |
+
tabs[activeIndex].classList.remove('text-gray-500');
|
919 |
+
tabs[activeIndex].classList.add('text-accent', 'font-medium');
|
920 |
}
|
921 |
|
922 |
+
// Screen-specific initialization
|
923 |
+
switch(screenId) {
|
924 |
+
case 'settingsScreen':
|
925 |
+
loadSettings();
|
926 |
+
break;
|
927 |
+
case 'trainingScreen':
|
928 |
+
initTraining();
|
929 |
+
break;
|
930 |
+
case 'voiceTrainingScreen':
|
931 |
+
initVoiceTraining();
|
932 |
+
break;
|
933 |
}
|
934 |
+
|
935 |
+
// Save last viewed screen
|
936 |
+
localStorage.setItem('lastScreen', screenId);
|
937 |
}
|
938 |
|
939 |
// Save call timing settings to localStorage
|
|
|
996 |
}
|
997 |
|
998 |
// Answer call function
|
999 |
+
function answerCall(options = {}) {
|
1000 |
+
const {
|
1001 |
+
isTest = false,
|
1002 |
+
customNumber = '',
|
1003 |
+
scenario = 'standard',
|
1004 |
+
retryCount = 0
|
1005 |
+
} = options;
|
1006 |
+
|
1007 |
+
// Log this interaction for learning
|
1008 |
+
const callLog = {
|
1009 |
+
timestamp: new Date().toISOString(),
|
1010 |
+
number: customNumber || (isTest ? 'Test Caller' : 'Unknown'),
|
1011 |
+
scenario,
|
1012 |
+
duration: businessSettings.maxRingTime,
|
1013 |
+
response: businessSettings.personalGreeting,
|
1014 |
+
outcome: 'answered'
|
1015 |
+
};
|
1016 |
+
|
1017 |
+
trainingData.callHistory = trainingData.callHistory || [];
|
1018 |
+
trainingData.callHistory.push(callLog);
|
1019 |
const fab = document.querySelector('.fab');
|
1020 |
const icon = fab.querySelector('i');
|
1021 |
|
|
|
1174 |
}
|
1175 |
}
|
1176 |
|
1177 |
+
// Advanced Reasoning Engine
|
1178 |
function processTrainingInput(inputText) {
|
1179 |
+
// Use NLP to analyze input
|
1180 |
+
const tokens = inputText.toLowerCase().split(/\s+/);
|
1181 |
+
const intent = detectIntent(inputText);
|
1182 |
+
const context = buildContext(inputText);
|
1183 |
+
|
1184 |
+
// Apply reasoning rules
|
1185 |
+
const reasoningChain = buildReasoningChain(intent, context);
|
1186 |
+
|
1187 |
+
// Generate response variants
|
1188 |
+
const responseOptions = generateResponseVariants(intent, context);
|
1189 |
+
|
1190 |
+
return {
|
1191 |
+
success: true,
|
1192 |
+
intent: intent,
|
1193 |
+
reasoning: reasoningChain,
|
1194 |
+
responseVariants: responseOptions,
|
1195 |
+
confidence: 0.92, // AI's confidence in this solution
|
1196 |
+
suggestedActions: generateFollowUpQuestions(intent)
|
1197 |
+
};
|
1198 |
+
}
|
1199 |
+
|
1200 |
+
function detectIntent(text) {
|
1201 |
+
// Enhanced intent detection with fuzzy matching
|
1202 |
+
const normalized = text.toLowerCase();
|
1203 |
+
|
1204 |
+
// Business vs personal detection
|
1205 |
+
if (/business|client|customer|service/i.test(normalized)) {
|
1206 |
+
return 'business_call';
|
1207 |
+
} else if (/family|friend|personal/i.test(normalized)) {
|
1208 |
+
return 'personal_call';
|
1209 |
+
}
|
1210 |
+
|
1211 |
+
// Action detection
|
1212 |
+
if (/transfer|connect|put through/i.test(normalized)) {
|
1213 |
+
return 'call_transfer';
|
1214 |
+
} else if (/message|text|notify/i.test(normalized)) {
|
1215 |
+
return 'message_relay';
|
1216 |
+
}
|
1217 |
+
|
1218 |
+
// Default to learning new response
|
1219 |
+
return 'new_response_pattern';
|
1220 |
+
}
|
1221 |
+
|
1222 |
+
function buildReasoningChain(intent, context) {
|
1223 |
+
// Build logical reasoning steps
|
1224 |
+
const chains = {
|
1225 |
+
'business_call': [
|
1226 |
+
"Detected business context → Using formal tone",
|
1227 |
+
"Verified working hours → Will mention availability",
|
1228 |
+
"Analyzed keywords → Focus on professional responses"
|
1229 |
+
],
|
1230 |
+
'personal_call': [
|
1231 |
+
"Identified personal connection → Using friendly tone",
|
1232 |
+
"Checked relationship markers → Adjusting familiarity level",
|
1233 |
+
"Assessed urgency → Determining response priority"
|
1234 |
+
]
|
1235 |
+
};
|
1236 |
+
|
1237 |
+
return chains[intent] || [
|
1238 |
+
"No specific pattern found → Learning new behavior",
|
1239 |
+
"Analyzing word frequency → Building response map",
|
1240 |
+
"Cross-referencing with existing knowledge → Creating adaptive response"
|
1241 |
+
];
|
1242 |
+
}
|
1243 |
// First analyze diction and emotional context
|
1244 |
analyzeDiction(inputText);
|
1245 |
|
|
|
1276 |
}
|
1277 |
|
1278 |
// Grok-style diction analysis
|
1279 |
+
// Voice Training System
|
1280 |
+
let voiceRecording = false;
|
1281 |
+
let mediaRecorder;
|
1282 |
+
let audioChunks = [];
|
1283 |
+
|
1284 |
+
function startVoiceTraining() {
|
1285 |
+
const button = document.getElementById('startRecording');
|
1286 |
+
|
1287 |
+
if (!voiceRecording) {
|
1288 |
+
// Start recording
|
1289 |
+
button.innerHTML = '<i class="fas fa-stop mr-2"></i> Stop Recording';
|
1290 |
+
button.classList.add('bg-red-600');
|
1291 |
+
|
1292 |
+
navigator.mediaDevices.getUserMedia({ audio: true })
|
1293 |
+
.then(stream => {
|
1294 |
+
mediaRecorder = new MediaRecorder(stream);
|
1295 |
+
mediaRecorder.start();
|
1296 |
+
voiceRecording = true;
|
1297 |
+
|
1298 |
+
mediaRecorder.ondataavailable = e => {
|
1299 |
+
audioChunks.push(e.data);
|
1300 |
+
};
|
1301 |
+
|
1302 |
+
mediaRecorder.onstop = () => {
|
1303 |
+
processVoiceRecording();
|
1304 |
+
};
|
1305 |
+
});
|
1306 |
+
} else {
|
1307 |
+
// Stop recording
|
1308 |
+
button.innerHTML = '<i class="fas fa-microphone mr-2"></i> Record Response';
|
1309 |
+
button.classList.remove('bg-red-600');
|
1310 |
+
|
1311 |
+
mediaRecorder.stop();
|
1312 |
+
voiceRecording = false;
|
1313 |
+
}
|
1314 |
+
}
|
1315 |
+
|
1316 |
+
function processVoiceRecording() {
|
1317 |
+
const audioBlob = new Blob(audioChunks);
|
1318 |
+
audioChunks = [];
|
1319 |
+
|
1320 |
+
// In a real implementation, you would send this to a voice processing API
|
1321 |
+
const feedback = document.getElementById('voiceFeedback');
|
1322 |
+
const bubble = document.createElement('div');
|
1323 |
+
bubble.className = 'ai-bubble mt-4 animate-fadeIn';
|
1324 |
+
bubble.innerHTML = `
|
1325 |
+
<p>Recording received! Analysis:</p>
|
1326 |
+
<ul class="ml-5 mt-2 list-disc">
|
1327 |
+
<li>Tone: Professional</li>
|
1328 |
+
<li>Clarity: 85%</li>
|
1329 |
+
<li>Recommended pacing adjustment: Slightly slower</li>
|
1330 |
+
</ul>
|
1331 |
+
<p class="mt-2">Would you like to save this as a standard response?</p>
|
1332 |
+
`;
|
1333 |
+
feedback.appendChild(bubble);
|
1334 |
+
|
1335 |
+
// Save to training data
|
1336 |
+
trainingData.voiceTraining.voiceResponses.push({
|
1337 |
+
timestamp: new Date().toISOString(),
|
1338 |
+
blob: URL.createObjectURL(audioBlob),
|
1339 |
+
analysis: {
|
1340 |
+
tone: 'professional',
|
1341 |
+
clarity: 0.85,
|
1342 |
+
pace: 0.72
|
1343 |
+
}
|
1344 |
+
});
|
1345 |
+
|
1346 |
+
localStorage.setItem('AI_Training', JSON.stringify(trainingData));
|
1347 |
+
}
|
1348 |
const words = text.toLowerCase().split(/\s+/);
|
1349 |
const contractions = text.match(/\w+'?\w*/g) || [];
|
1350 |
|
prompts.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
Please fix the settingss tab it doesn’t work. I want to be able to set duration times . For example if I don’t answer a test after a certain amount of minutes or if I don’t answer a call after a certain amount of time then the ai answers the call or responds via text
|
2 |
When I click settings nothing opens please fix code then run test to make sure all is working and this application is directly linked to my phone callls and text 5622289429
|
3 |
-
Settings button still does nothing. When you click it I want it to be able to set duration times of when the ai should respond with text or answer the call.
|
|
|
|
1 |
Please fix the settingss tab it doesn’t work. I want to be able to set duration times . For example if I don’t answer a test after a certain amount of minutes or if I don’t answer a call after a certain amount of time then the ai answers the call or responds via text
|
2 |
When I click settings nothing opens please fix code then run test to make sure all is working and this application is directly linked to my phone callls and text 5622289429
|
3 |
+
Settings button still does nothing. When you click it I want it to be able to set duration times of when the ai should respond with text or answer the call.
|
4 |
+
You might need to recode . Settings button doesn’t work and neither do some of the other buttons. Improve the test ai calling by having it call a number I give to it and have it learn. Also improve the training code and add reasoning. It’s too simple. Separate training into two buttons one for text tracing and one for voice answering training.
|