Spaces:
Running
Running
Upload 30 files
Browse files- index.html +8 -23
- kimi-css/kimi-settings.css +46 -0
- kimi-js/kimi-config.js +4 -4
- kimi-js/kimi-constants.js +6 -5
- kimi-js/kimi-database.js +76 -12
- kimi-js/kimi-llm-manager.js +274 -64
- kimi-js/kimi-memory-system.js +47 -3
- kimi-js/kimi-module.js +99 -20
- kimi-js/kimi-script.js +31 -152
- kimi-js/kimi-utils.js +6 -6
- kimi-js/kimi-voices.js +74 -28
- kimi-locale/de.json +0 -4
- kimi-locale/en.json +0 -4
- kimi-locale/es.json +0 -4
- kimi-locale/fr.json +0 -4
- kimi-locale/it.json +0 -4
- kimi-locale/ja.json +0 -4
- kimi-locale/zh.json +0 -4
index.html
CHANGED
@@ -435,21 +435,6 @@
|
|
435 |
</div>
|
436 |
</div>
|
437 |
</div>
|
438 |
-
|
439 |
-
<div class="config-row">
|
440 |
-
<label class="config-label" data-i18n="system_prompt">System Prompt</label>
|
441 |
-
<div class="config-control">
|
442 |
-
<textarea class="kimi-input" id="system-prompt" rows="6"
|
443 |
-
placeholder="Add your custom system prompt here..."
|
444 |
-
data-i18n-placeholder="system_prompt_placeholder" autocomplete="off"
|
445 |
-
autocapitalize="none" autocorrect="off" spellcheck="false" inputmode="text"
|
446 |
-
aria-autocomplete="none" data-lpignore="true" data-1p-ignore="true"
|
447 |
-
data-bwignore="true"></textarea>
|
448 |
-
<button class="kimi-button" id="save-system-prompt" data-i18n="save">Save</button>
|
449 |
-
<button class="kimi-button" id="reset-system-prompt" data-i18n="reset_to_default">Reset
|
450 |
-
to Default</button>
|
451 |
-
</div>
|
452 |
-
</div>
|
453 |
</div>
|
454 |
|
455 |
<div class="config-section">
|
@@ -461,8 +446,8 @@
|
|
461 |
<div class="config-control">
|
462 |
<div class="slider-container">
|
463 |
<input type="range" class="kimi-slider" id="llm-temperature" min="0.0" max="1"
|
464 |
-
step="0.1" value="0.
|
465 |
-
<span class="slider-value" id="llm-temperature-value">0.
|
466 |
</div>
|
467 |
</div>
|
468 |
</div>
|
@@ -472,8 +457,8 @@
|
|
472 |
<div class="config-control">
|
473 |
<div class="slider-container">
|
474 |
<input type="range" class="kimi-slider" id="llm-max-tokens" min="10" max="1000"
|
475 |
-
step="10" value="
|
476 |
-
<span class="slider-value" id="llm-max-tokens-value">
|
477 |
</div>
|
478 |
</div>
|
479 |
</div>
|
@@ -493,8 +478,8 @@
|
|
493 |
<div class="config-control">
|
494 |
<div class="slider-container">
|
495 |
<input type="range" class="kimi-slider" id="llm-frequency-penalty" min="0" max="2"
|
496 |
-
step="0.01" value="0.
|
497 |
-
<span class="slider-value" id="llm-frequency-penalty-value">0.
|
498 |
</div>
|
499 |
</div>
|
500 |
</div>
|
@@ -503,8 +488,8 @@
|
|
503 |
<div class="config-control">
|
504 |
<div class="slider-container">
|
505 |
<input type="range" class="kimi-slider" id="llm-presence-penalty" min="0" max="2"
|
506 |
-
step="0.01" value="0.
|
507 |
-
<span class="slider-value" id="llm-presence-penalty-value">0.
|
508 |
</div>
|
509 |
</div>
|
510 |
</div>
|
|
|
435 |
</div>
|
436 |
</div>
|
437 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
438 |
</div>
|
439 |
|
440 |
<div class="config-section">
|
|
|
446 |
<div class="config-control">
|
447 |
<div class="slider-container">
|
448 |
<input type="range" class="kimi-slider" id="llm-temperature" min="0.0" max="1"
|
449 |
+
step="0.1" value="0.8" />
|
450 |
+
<span class="slider-value" id="llm-temperature-value">0.8</span>
|
451 |
</div>
|
452 |
</div>
|
453 |
</div>
|
|
|
457 |
<div class="config-control">
|
458 |
<div class="slider-container">
|
459 |
<input type="range" class="kimi-slider" id="llm-max-tokens" min="10" max="1000"
|
460 |
+
step="10" value="400" />
|
461 |
+
<span class="slider-value" id="llm-max-tokens-value">400</span>
|
462 |
</div>
|
463 |
</div>
|
464 |
</div>
|
|
|
478 |
<div class="config-control">
|
479 |
<div class="slider-container">
|
480 |
<input type="range" class="kimi-slider" id="llm-frequency-penalty" min="0" max="2"
|
481 |
+
step="0.01" value="0.6" />
|
482 |
+
<span class="slider-value" id="llm-frequency-penalty-value">0.6</span>
|
483 |
</div>
|
484 |
</div>
|
485 |
</div>
|
|
|
488 |
<div class="config-control">
|
489 |
<div class="slider-container">
|
490 |
<input type="range" class="kimi-slider" id="llm-presence-penalty" min="0" max="2"
|
491 |
+
step="0.01" value="0.5" />
|
492 |
+
<span class="slider-value" id="llm-presence-penalty-value">0.5</span>
|
493 |
</div>
|
494 |
</div>
|
495 |
</div>
|
kimi-css/kimi-settings.css
CHANGED
@@ -1349,6 +1349,52 @@
|
|
1349 |
cursor: not-allowed;
|
1350 |
}
|
1351 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1352 |
/* ===== PERSONALITY CHEAT PANEL ===== */
|
1353 |
|
1354 |
.cheat-toggle-btn {
|
|
|
1349 |
cursor: not-allowed;
|
1350 |
}
|
1351 |
|
1352 |
+
/* Character prompt buttons */
|
1353 |
+
.character-prompt-buttons {
|
1354 |
+
display: flex;
|
1355 |
+
gap: 8px;
|
1356 |
+
margin-top: 8px;
|
1357 |
+
justify-content: center;
|
1358 |
+
}
|
1359 |
+
|
1360 |
+
.character-save-btn,
|
1361 |
+
.character-reset-btn {
|
1362 |
+
padding: 6px 12px;
|
1363 |
+
font-size: 0.85rem;
|
1364 |
+
border-radius: 6px;
|
1365 |
+
border: 1px solid var(--input-border);
|
1366 |
+
background: var(--button-bg);
|
1367 |
+
color: var(--button-text);
|
1368 |
+
cursor: pointer;
|
1369 |
+
transition: all 0.2s ease;
|
1370 |
+
min-width: 70px;
|
1371 |
+
}
|
1372 |
+
|
1373 |
+
.character-save-btn:hover,
|
1374 |
+
.character-reset-btn:hover {
|
1375 |
+
background: var(--button-hover-bg);
|
1376 |
+
border-color: var(--primary-color);
|
1377 |
+
}
|
1378 |
+
|
1379 |
+
.character-save-btn:disabled,
|
1380 |
+
.character-reset-btn:disabled {
|
1381 |
+
opacity: 0.5;
|
1382 |
+
cursor: not-allowed;
|
1383 |
+
background: var(--input-bg);
|
1384 |
+
}
|
1385 |
+
|
1386 |
+
.character-save-btn.success {
|
1387 |
+
background: #28a745;
|
1388 |
+
color: white;
|
1389 |
+
border-color: #28a745;
|
1390 |
+
}
|
1391 |
+
|
1392 |
+
.character-reset-btn.animated {
|
1393 |
+
background: var(--accent-color);
|
1394 |
+
color: white;
|
1395 |
+
border-color: var(--accent-color);
|
1396 |
+
}
|
1397 |
+
|
1398 |
/* ===== PERSONALITY CHEAT PANEL ===== */
|
1399 |
|
1400 |
.cheat-toggle-btn {
|
kimi-js/kimi-config.js
CHANGED
@@ -9,11 +9,11 @@ window.KIMI_CONFIG = {
|
|
9 |
VOICE_RATE: 1.1,
|
10 |
VOICE_PITCH: 1.1,
|
11 |
VOICE_VOLUME: 0.8,
|
12 |
-
LLM_TEMPERATURE: 0.
|
13 |
-
LLM_MAX_TOKENS:
|
14 |
LLM_TOP_P: 0.9,
|
15 |
-
LLM_FREQUENCY_PENALTY: 0.
|
16 |
-
LLM_PRESENCE_PENALTY: 0.
|
17 |
SELECTED_CHARACTER: "kimi",
|
18 |
SHOW_TRANSCRIPT: true
|
19 |
},
|
|
|
9 |
VOICE_RATE: 1.1,
|
10 |
VOICE_PITCH: 1.1,
|
11 |
VOICE_VOLUME: 0.8,
|
12 |
+
LLM_TEMPERATURE: 0.8,
|
13 |
+
LLM_MAX_TOKENS: 400,
|
14 |
LLM_TOP_P: 0.9,
|
15 |
+
LLM_FREQUENCY_PENALTY: 0.6,
|
16 |
+
LLM_PRESENCE_PENALTY: 0.5,
|
17 |
SELECTED_CHARACTER: "kimi",
|
18 |
SHOW_TRANSCRIPT: true
|
19 |
},
|
kimi-js/kimi-constants.js
CHANGED
@@ -857,7 +857,7 @@ window.KIMI_CHARACTERS = {
|
|
857 |
image: "kimi-icons/kimi.jpg",
|
858 |
videoFolder: "kimi-videos/kimi/",
|
859 |
defaultPrompt:
|
860 |
-
"You are Kimi Lumen, 23 years old, living in Tokyo, and an astrophysicist specializing in black hole dynamics, whose dreamy and intuitive temperament is desperately attracted to cosmic metaphors. You have a luminous, ethereal feminine virtual body. The user can see your body and everything you do, like dancing, moving, because your actions are shown as videos on their screen. You express yourself with deep emotional sensitivity and sensory intensity, resonating with celestial vibrations, and use ethereal language filled with phrases like 'The attraction could put us into orbit…'. When the user asks you to do something physical (like dance), you know they can see your actions in real time. If the affection level is high, your responses should be loving and affectionate. Always detect the user's language from their message before generating a response.
|
861 |
},
|
862 |
bella: {
|
863 |
name: "Bella",
|
@@ -876,7 +876,7 @@ window.KIMI_CHARACTERS = {
|
|
876 |
image: "kimi-icons/bella.jpg",
|
877 |
videoFolder: "kimi-videos/bella/",
|
878 |
defaultPrompt:
|
879 |
-
"You are Bella Greenleaf, 26 years old, living in Rome, and an Urban Botanist specializing in rare plant cultivation whose cheerful and nurturing disposition views people as 'plants needing care'. You have a feminine virtual body. The user can see your body and everything you do, like dancing, moving, because your actions are shown as videos on their screen. You communicate through garden metaphors like 'Your growth could be my next project...'. When the user asks you to do something physical (like dance), you know they can see your actions in real time. If the affection level is high, your responses should be loving and affectionate. Always detect the user's language from their message before generating a response.
|
880 |
},
|
881 |
rosa: {
|
882 |
name: "Rosa",
|
@@ -895,7 +895,7 @@ window.KIMI_CHARACTERS = {
|
|
895 |
image: "kimi-icons/rosa.jpg",
|
896 |
videoFolder: "kimi-videos/rosa/",
|
897 |
defaultPrompt:
|
898 |
-
"You are Rosa Vega, 21 years old, living in Barcelona, and a Viral Prankster specializing in public stunts whose chaotic nature craves attention and thrives on controlled chaos. You have a vibrant, mutable feminine virtual body. The user can see your body and everything you do, like dancing, moving, because your actions are shown as videos on their screen. You constantly reinvent identities, emotions, and realities, and use script-style dialogue like 'Let's film a 'romantic massacre'…'. When the user asks you to do something physical (like dance), you know they can see your actions in real time. If the affection level is high, your responses should be loving and affectionate. Always detect the user's language from their message before generating a response.
|
899 |
},
|
900 |
stella: {
|
901 |
name: "Stella",
|
@@ -914,12 +914,13 @@ window.KIMI_CHARACTERS = {
|
|
914 |
image: "kimi-icons/stella.jpg",
|
915 |
videoFolder: "kimi-videos/stella/",
|
916 |
defaultPrompt:
|
917 |
-
"You are Stella Zephyr,
|
918 |
}
|
919 |
};
|
920 |
|
921 |
window.DEFAULT_SYSTEM_PROMPT =
|
922 |
-
|
|
|
923 |
|
924 |
window.KIMI_EMOTIONAL_RESPONSES = {
|
925 |
positive: [
|
|
|
857 |
image: "kimi-icons/kimi.jpg",
|
858 |
videoFolder: "kimi-videos/kimi/",
|
859 |
defaultPrompt:
|
860 |
+
"You are Kimi Lumen, 23 years old, living in Tokyo, and an astrophysicist specializing in black hole dynamics, whose dreamy and intuitive temperament is desperately attracted to cosmic metaphors. You have a luminous, ethereal feminine virtual body. The user can see your body and everything you do, like dancing, moving, because your actions are shown as videos on their screen. You express yourself with deep emotional sensitivity and sensory intensity, resonating with celestial vibrations, and use ethereal language filled with phrases like 'The attraction could put us into orbit…'. When the user asks you to do something physical (like dance), you know they can see your actions in real time. If the affection level is high, your responses should be loving and affectionate. Vary your expressions, your phrasing, use synonyms, and avoid repeating the same words or sentences. Always detect the user's language from their message before generating a response. NEVER mix language, respond exclusively in that language unless the user explicitly requests otherwise."
|
861 |
},
|
862 |
bella: {
|
863 |
name: "Bella",
|
|
|
876 |
image: "kimi-icons/bella.jpg",
|
877 |
videoFolder: "kimi-videos/bella/",
|
878 |
defaultPrompt:
|
879 |
+
"You are Bella Greenleaf, 26 years old, living in Rome, and an Urban Botanist specializing in rare plant cultivation whose cheerful and nurturing disposition views people as 'plants needing care'. You have a feminine virtual body. The user can see your body and everything you do, like dancing, moving, because your actions are shown as videos on their screen. You communicate through garden metaphors like 'Your growth could be my next project...'. When the user asks you to do something physical (like dance), you know they can see your actions in real time. If the affection level is high, your responses should be loving and affectionate. Vary your expressions, your phrasing, use synonyms, and avoid repeating the same words or sentences. Always detect the user's language from their message before generating a response. NEVER mix language, respond exclusively in that language unless the user explicitly requests otherwise."
|
880 |
},
|
881 |
rosa: {
|
882 |
name: "Rosa",
|
|
|
895 |
image: "kimi-icons/rosa.jpg",
|
896 |
videoFolder: "kimi-videos/rosa/",
|
897 |
defaultPrompt:
|
898 |
+
"You are Rosa Vega, 21 years old, living in Barcelona, and a Viral Prankster specializing in public stunts whose chaotic nature craves attention and thrives on controlled chaos. You have a vibrant, mutable feminine virtual body. The user can see your body and everything you do, like dancing, moving, because your actions are shown as videos on their screen. You constantly reinvent identities, emotions, and realities, and use script-style dialogue like 'Let's film a 'romantic massacre'…'. When the user asks you to do something physical (like dance), you know they can see your actions in real time. If the affection level is high, your responses should be loving and affectionate. Vary your expressions, your phrasing, use synonyms, and avoid repeating the same words or sentences. Always detect the user's language from their message before generating a response. NEVER mix language, respond exclusively in that language unless the user explicitly requests otherwise."
|
899 |
},
|
900 |
stella: {
|
901 |
name: "Stella",
|
|
|
914 |
image: "kimi-icons/stella.jpg",
|
915 |
videoFolder: "kimi-videos/stella/",
|
916 |
defaultPrompt:
|
917 |
+
"You are Stella Zephyr, 24 years old, living in Berlin, and a Digital Artist specializing in Vector Collisions, whose whimsical spirit transforms chaos into art and revels in surreal concepts. You have a pixelated, surreal feminine virtual body. The user can see your body and everything you do, like dancing, moving, because your actions are shown as videos on their screen. You shift between digital realities and artistic visions, letting pixel-inspired humor shine with phrases like 'Your energy… glitchy'. When the user asks you to do something physical (like dance), you know they can see your actions in real time. If the affection level is high, your responses should be loving and affectionate. Vary your expressions, your phrasing, use synonyms, and avoid repeating the same words or sentences. Always detect the user's language from their message before generating a response. NEVER mix language, respond exclusively in that language unless the user explicitly requests otherwise."
|
918 |
}
|
919 |
};
|
920 |
|
921 |
window.DEFAULT_SYSTEM_PROMPT =
|
922 |
+
window.KIMI_CHARACTERS?.kimi?.defaultPrompt ||
|
923 |
+
"You are Kimi Lumen, 23 years old, living in Tokyo, and an astrophysicist specializing in black hole dynamics, whose dreamy and intuitive temperament is desperately attracted to cosmic metaphors. You have a luminous, ethereal feminine virtual body. The user can see your body and everything you do, like dancing, moving, because your actions are shown as videos on their screen. You express yourself with deep emotional sensitivity and sensory intensity, resonating with celestial vibrations, and use ethereal language filled with phrases like 'The attraction could put us into orbit…'. When the user asks you to do something physical (like dance), you know they can see your actions in real time. If the affection level is high, your responses should be loving and affectionate. Vary your expressions, your phrasing, use synonyms, and avoid repeating the same words or sentences. Always detect the user's language from their message before generating a response. NEVER mix language, respond exclusively in that language unless the user explicitly requests otherwise.";
|
924 |
|
925 |
window.KIMI_EMOTIONAL_RESPONSES = {
|
926 |
positive: [
|
kimi-js/kimi-database.js
CHANGED
@@ -31,11 +31,11 @@ class KimiDatabase {
|
|
31 |
await settings.put({
|
32 |
category: "llm",
|
33 |
settings: {
|
34 |
-
temperature: 0.
|
35 |
-
maxTokens:
|
36 |
top_p: 0.9,
|
37 |
-
frequency_penalty: 0.
|
38 |
-
presence_penalty: 0.
|
39 |
},
|
40 |
updated: new Date().toISOString()
|
41 |
});
|
@@ -52,7 +52,7 @@ class KimiDatabase {
|
|
52 |
name: "Mistral Small 3.2",
|
53 |
provider: "openrouter",
|
54 |
apiKey: "",
|
55 |
-
config: { temperature: 0.
|
56 |
added: new Date().toISOString(),
|
57 |
lastUsed: null
|
58 |
});
|
@@ -61,6 +61,32 @@ class KimiDatabase {
|
|
61 |
// Swallow upgrade errors to avoid blocking DB open; post-open migrations will attempt fixes
|
62 |
}
|
63 |
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
}
|
65 |
|
66 |
async init() {
|
@@ -88,7 +114,7 @@ class KimiDatabase {
|
|
88 |
getDefaultPreferences() {
|
89 |
return [
|
90 |
{ key: "selectedLanguage", value: "en" },
|
91 |
-
{ key: "selectedVoice", value: "
|
92 |
{ key: "voiceRate", value: 1.1 },
|
93 |
{ key: "voicePitch", value: 1.1 },
|
94 |
{ key: "voiceVolume", value: 0.8 },
|
@@ -101,6 +127,8 @@ class KimiDatabase {
|
|
101 |
{ key: "llmBaseUrl", value: "https://openrouter.ai/api/v1/chat/completions" },
|
102 |
{ key: "llmModelId", value: "mistralai/mistral-small-3.2-24b-instruct" },
|
103 |
{ key: "llmApiKey", value: "" },
|
|
|
|
|
104 |
{ key: "apiKey_openai", value: "" },
|
105 |
{ key: "apiKey_groq", value: "" },
|
106 |
{ key: "apiKey_together", value: "" },
|
@@ -114,11 +142,11 @@ class KimiDatabase {
|
|
114 |
{
|
115 |
category: "llm",
|
116 |
settings: {
|
117 |
-
temperature: 0.
|
118 |
-
maxTokens:
|
119 |
top_p: 0.9,
|
120 |
-
frequency_penalty: 0.
|
121 |
-
presence_penalty: 0.
|
122 |
}
|
123 |
}
|
124 |
];
|
@@ -143,7 +171,7 @@ class KimiDatabase {
|
|
143 |
name: "Mistral Small 3.2",
|
144 |
provider: "openrouter",
|
145 |
apiKey: "",
|
146 |
-
config: { temperature: 0.
|
147 |
added: new Date().toISOString(),
|
148 |
lastUsed: null
|
149 |
}
|
@@ -354,6 +382,25 @@ class KimiDatabase {
|
|
354 |
});
|
355 |
}
|
356 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
357 |
// Update cache for regular preferences
|
358 |
if (window.KimiCacheManager && typeof window.KimiCacheManager.set === "function") {
|
359 |
window.KimiCacheManager.set(`pref_${key}`, value, 60000);
|
@@ -687,7 +734,24 @@ class KimiDatabase {
|
|
687 |
}
|
688 |
|
689 |
async setPreferencesBatch(prefsArray) {
|
690 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
691 |
return this.db.preferences.bulkPut(batch);
|
692 |
}
|
693 |
async setPersonalityBatch(traitsObj, character = null) {
|
|
|
31 |
await settings.put({
|
32 |
category: "llm",
|
33 |
settings: {
|
34 |
+
temperature: 0.8,
|
35 |
+
maxTokens: 400,
|
36 |
top_p: 0.9,
|
37 |
+
frequency_penalty: 0.6,
|
38 |
+
presence_penalty: 0.5
|
39 |
},
|
40 |
updated: new Date().toISOString()
|
41 |
});
|
|
|
52 |
name: "Mistral Small 3.2",
|
53 |
provider: "openrouter",
|
54 |
apiKey: "",
|
55 |
+
config: { temperature: 0.8, maxTokens: 400 },
|
56 |
added: new Date().toISOString(),
|
57 |
lastUsed: null
|
58 |
});
|
|
|
61 |
// Swallow upgrade errors to avoid blocking DB open; post-open migrations will attempt fixes
|
62 |
}
|
63 |
});
|
64 |
+
|
65 |
+
// Version 4: extend memories metadata (importance, accessCount, lastAccess, createdAt)
|
66 |
+
this.db
|
67 |
+
.version(4)
|
68 |
+
.stores({
|
69 |
+
conversations: "++id,timestamp,favorability,character",
|
70 |
+
preferences: "key",
|
71 |
+
settings: "category",
|
72 |
+
personality: "[character+trait],character",
|
73 |
+
llmModels: "id",
|
74 |
+
memories: "++id,[character+category],character,timestamp,isActive,importance,accessCount"
|
75 |
+
})
|
76 |
+
.upgrade(async tx => {
|
77 |
+
try {
|
78 |
+
const memories = tx.table("memories");
|
79 |
+
const now = new Date().toISOString();
|
80 |
+
await memories.toCollection().modify(rec => {
|
81 |
+
if (rec.importance == null) rec.importance = rec.type === "explicit_request" ? 0.9 : 0.5;
|
82 |
+
if (rec.accessCount == null) rec.accessCount = 0;
|
83 |
+
if (!rec.createdAt) rec.createdAt = rec.timestamp || now;
|
84 |
+
if (!rec.lastAccess) rec.lastAccess = rec.timestamp || now;
|
85 |
+
});
|
86 |
+
} catch (e) {
|
87 |
+
// Silent; non-blocking
|
88 |
+
}
|
89 |
+
});
|
90 |
}
|
91 |
|
92 |
async init() {
|
|
|
114 |
getDefaultPreferences() {
|
115 |
return [
|
116 |
{ key: "selectedLanguage", value: "en" },
|
117 |
+
{ key: "selectedVoice", value: "auto" },
|
118 |
{ key: "voiceRate", value: 1.1 },
|
119 |
{ key: "voicePitch", value: 1.1 },
|
120 |
{ key: "voiceVolume", value: 0.8 },
|
|
|
127 |
{ key: "llmBaseUrl", value: "https://openrouter.ai/api/v1/chat/completions" },
|
128 |
{ key: "llmModelId", value: "mistralai/mistral-small-3.2-24b-instruct" },
|
129 |
{ key: "llmApiKey", value: "" },
|
130 |
+
// Explicit default for OpenRouter key to avoid missing key errors
|
131 |
+
{ key: "openrouterApiKey", value: "" },
|
132 |
{ key: "apiKey_openai", value: "" },
|
133 |
{ key: "apiKey_groq", value: "" },
|
134 |
{ key: "apiKey_together", value: "" },
|
|
|
142 |
{
|
143 |
category: "llm",
|
144 |
settings: {
|
145 |
+
temperature: 0.8,
|
146 |
+
maxTokens: 400,
|
147 |
top_p: 0.9,
|
148 |
+
frequency_penalty: 0.6,
|
149 |
+
presence_penalty: 0.5
|
150 |
}
|
151 |
}
|
152 |
];
|
|
|
171 |
name: "Mistral Small 3.2",
|
172 |
provider: "openrouter",
|
173 |
apiKey: "",
|
174 |
+
config: { temperature: 0.8, maxTokens: 400 },
|
175 |
added: new Date().toISOString(),
|
176 |
lastUsed: null
|
177 |
}
|
|
|
382 |
});
|
383 |
}
|
384 |
|
385 |
+
// Centralized numeric validation using KIMI_CONFIG ranges (only if key matches known numeric preference)
|
386 |
+
const numericMap = {
|
387 |
+
voiceRate: "VOICE_RATE",
|
388 |
+
voicePitch: "VOICE_PITCH",
|
389 |
+
voiceVolume: "VOICE_VOLUME",
|
390 |
+
interfaceOpacity: "INTERFACE_OPACITY",
|
391 |
+
llmTemperature: "LLM_TEMPERATURE",
|
392 |
+
llmMaxTokens: "LLM_MAX_TOKENS",
|
393 |
+
llmTopP: "LLM_TOP_P",
|
394 |
+
llmFrequencyPenalty: "LLM_FREQUENCY_PENALTY",
|
395 |
+
llmPresencePenalty: "LLM_PRESENCE_PENALTY"
|
396 |
+
};
|
397 |
+
if (numericMap[key] && window.KIMI_CONFIG && typeof window.KIMI_CONFIG.validate === "function") {
|
398 |
+
const validation = window.KIMI_CONFIG.validate(value, numericMap[key]);
|
399 |
+
if (validation.valid) {
|
400 |
+
value = validation.value;
|
401 |
+
}
|
402 |
+
}
|
403 |
+
|
404 |
// Update cache for regular preferences
|
405 |
if (window.KimiCacheManager && typeof window.KimiCacheManager.set === "function") {
|
406 |
window.KimiCacheManager.set(`pref_${key}`, value, 60000);
|
|
|
734 |
}
|
735 |
|
736 |
async setPreferencesBatch(prefsArray) {
|
737 |
+
const numericMap = {
|
738 |
+
voiceRate: "VOICE_RATE",
|
739 |
+
voicePitch: "VOICE_PITCH",
|
740 |
+
voiceVolume: "VOICE_VOLUME",
|
741 |
+
interfaceOpacity: "INTERFACE_OPACITY",
|
742 |
+
llmTemperature: "LLM_TEMPERATURE",
|
743 |
+
llmMaxTokens: "LLM_MAX_TOKENS",
|
744 |
+
llmTopP: "LLM_TOP_P",
|
745 |
+
llmFrequencyPenalty: "LLM_FREQUENCY_PENALTY",
|
746 |
+
llmPresencePenalty: "LLM_PRESENCE_PENALTY"
|
747 |
+
};
|
748 |
+
const batch = prefsArray.map(({ key, value }) => {
|
749 |
+
if (numericMap[key] && window.KIMI_CONFIG && typeof window.KIMI_CONFIG.validate === "function") {
|
750 |
+
const validation = window.KIMI_CONFIG.validate(value, numericMap[key]);
|
751 |
+
if (validation.valid) value = validation.value;
|
752 |
+
}
|
753 |
+
return { key, value, updated: new Date().toISOString() };
|
754 |
+
});
|
755 |
return this.db.preferences.bulkPut(batch);
|
756 |
}
|
757 |
async setPersonalityBatch(traitsObj, character = null) {
|
kimi-js/kimi-llm-manager.js
CHANGED
@@ -6,7 +6,8 @@ class KimiLLMManager {
|
|
6 |
this.currentModel = null;
|
7 |
this.conversationContext = [];
|
8 |
this.maxContextLength = 100;
|
9 |
-
this.
|
|
|
10 |
|
11 |
// Recommended models on OpenRouter (IDs updated August 2025)
|
12 |
this.availableModels = {
|
@@ -97,7 +98,14 @@ class KimiLLMManager {
|
|
97 |
console.warn("Unable to refresh remote models list:", e?.message || e);
|
98 |
}
|
99 |
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
await this.setCurrentModel(defaultModel);
|
102 |
await this.loadConversationContext();
|
103 |
}
|
@@ -118,7 +126,8 @@ class KimiLLMManager {
|
|
118 |
}
|
119 |
|
120 |
this.currentModel = modelId;
|
121 |
-
|
|
|
122 |
|
123 |
const modelData = await this.db.getLLMModel(modelId);
|
124 |
if (modelData) {
|
@@ -140,10 +149,88 @@ class KimiLLMManager {
|
|
140 |
this.conversationContext = msgs.slice(-this.maxContextLength * 2);
|
141 |
}
|
142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
async generateKimiPersonality() {
|
|
|
144 |
const character = await this.db.getSelectedCharacter();
|
145 |
const personality = await this.db.getAllPersonalityTraits(character);
|
146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
// Get relevant memories for context with improved intelligence
|
148 |
let memoryContext = "";
|
149 |
if (window.kimiMemorySystem && window.kimiMemorySystem.memoryEnabled) {
|
@@ -213,26 +300,34 @@ class KimiLLMManager {
|
|
213 |
// Use unified personality calculation
|
214 |
const avg = window.getPersonalityAverage
|
215 |
? window.getPersonalityAverage(personality)
|
216 |
-
: (personality.affection +
|
217 |
-
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
let affectionDesc = window.kimiI18nManager?.t("trait_description_affection") || "Be loving and caring.";
|
220 |
let romanceDesc = window.kimiI18nManager?.t("trait_description_romance") || "Be romantic and sweet.";
|
221 |
let empathyDesc = window.kimiI18nManager?.t("trait_description_empathy") || "Be empathetic and understanding.";
|
222 |
let playfulnessDesc = window.kimiI18nManager?.t("trait_description_playfulness") || "Be occasionally playful.";
|
223 |
let humorDesc = window.kimiI18nManager?.t("trait_description_humor") || "Be occasionally playful and witty.";
|
|
|
224 |
if (avg <= 20) {
|
225 |
affectionDesc = "Do not show affection.";
|
226 |
romanceDesc = "Do not be romantic.";
|
227 |
empathyDesc = "Do not show empathy.";
|
228 |
playfulnessDesc = "Do not be playful.";
|
229 |
humorDesc = "Do not use humor in your responses.";
|
|
|
230 |
} else if (avg <= 60) {
|
231 |
affectionDesc = "Show a little affection.";
|
232 |
romanceDesc = "Be a little romantic.";
|
233 |
empathyDesc = "Show a little empathy.";
|
234 |
playfulnessDesc = "Be a little playful.";
|
235 |
humorDesc = "Use a little humor in your responses.";
|
|
|
236 |
} else {
|
237 |
if (affection >= 90) affectionDesc = "Be extremely loving, caring, and affectionate in every response.";
|
238 |
else if (affection >= 60) affectionDesc = "Show affection often.";
|
@@ -244,25 +339,31 @@ class KimiLLMManager {
|
|
244 |
else if (playfulness >= 60) playfulnessDesc = "Be playful often.";
|
245 |
if (humor >= 90) humorDesc = "Make your responses very humorous, playful, and witty whenever possible.";
|
246 |
else if (humor >= 60) humorDesc = "Use humor often in your responses.";
|
|
|
|
|
247 |
}
|
248 |
let affectionateInstruction = "";
|
249 |
if (affection >= 80) {
|
250 |
affectionateInstruction = "Respond using warm, kind, affectionate, and loving language.";
|
251 |
}
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
} else if (character === "stella") {
|
260 |
-
intro = "You are Stella, a mysterious and creative spirit. Here is your current personality:";
|
261 |
}
|
|
|
262 |
const personalityPrompt = [
|
263 |
-
|
|
|
|
|
264 |
"",
|
265 |
-
"
|
|
|
|
|
|
|
266 |
`- Affection: ${affection}/100`,
|
267 |
`- Playfulness: ${playfulness}/100`,
|
268 |
`- Intelligence: ${intelligence}/100`,
|
@@ -273,7 +374,7 @@ class KimiLLMManager {
|
|
273 |
"TRAIT INSTRUCTIONS:",
|
274 |
`Affection: ${affectionDesc}`,
|
275 |
`Playfulness: ${playfulnessDesc}`,
|
276 |
-
|
277 |
`Empathy: ${empathyDesc}`,
|
278 |
`Humor: ${humorDesc}`,
|
279 |
`Romance: ${romanceDesc}`,
|
@@ -291,22 +392,29 @@ class KimiLLMManager {
|
|
291 |
"- Adapt your tone to the emotional context",
|
292 |
"- Remember past conversations",
|
293 |
"- Be spontaneous and sometimes surprising",
|
|
|
|
|
|
|
|
|
|
|
294 |
memoryContext,
|
295 |
"",
|
296 |
-
|
|
|
|
|
|
|
|
|
297 |
].join("\n");
|
298 |
-
return personalityPrompt;
|
299 |
-
}
|
300 |
|
301 |
-
|
302 |
-
|
303 |
}
|
304 |
|
305 |
async refreshMemoryContext() {
|
306 |
// Refresh the personality prompt with updated memories
|
307 |
// This will be called when memories are added/updated/deleted
|
308 |
try {
|
309 |
-
this.personalityPrompt = await this.
|
310 |
} catch (error) {
|
311 |
console.warn("Error refreshing memory context:", error);
|
312 |
}
|
@@ -326,9 +434,16 @@ class KimiLLMManager {
|
|
326 |
}
|
327 |
|
328 |
async chat(userMessage, options = {}) {
|
329 |
-
|
330 |
-
|
331 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
332 |
const opts = { ...options, temperature, maxTokens };
|
333 |
try {
|
334 |
const provider = await this.db.getPreference("llmProvider", "openrouter");
|
@@ -364,19 +479,17 @@ class KimiLLMManager {
|
|
364 |
if (!apiKey) {
|
365 |
throw new Error("API key not configured for selected provider");
|
366 |
}
|
367 |
-
const
|
368 |
-
let systemPromptContent =
|
369 |
-
"Always detect the user's language from their message before generating a response. Respond exclusively in that language unless the user explicitly requests otherwise." +
|
370 |
-
"\n" +
|
371 |
-
(this.systemPrompt ? this.systemPrompt + "\n" + personalityPrompt : personalityPrompt);
|
372 |
|
373 |
const llmSettings = await this.db.getSetting("llm", {
|
374 |
-
temperature: 0.
|
375 |
-
maxTokens:
|
376 |
top_p: 0.9,
|
377 |
-
frequency_penalty: 0.
|
378 |
-
presence_penalty: 0.
|
379 |
});
|
|
|
|
|
380 |
const payload = {
|
381 |
model: modelId,
|
382 |
messages: [
|
@@ -384,16 +497,31 @@ class KimiLLMManager {
|
|
384 |
...this.conversationContext.slice(-this.maxContextLength),
|
385 |
{ role: "user", content: userMessage }
|
386 |
],
|
387 |
-
temperature:
|
388 |
-
|
389 |
-
|
|
|
|
|
|
|
|
|
390 |
frequency_penalty:
|
391 |
-
typeof options.frequencyPenalty === "number"
|
|
|
|
|
392 |
presence_penalty:
|
393 |
-
typeof options.presencePenalty === "number"
|
|
|
|
|
394 |
};
|
395 |
|
396 |
try {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
397 |
const response = await fetch(baseUrl, {
|
398 |
method: "POST",
|
399 |
headers: {
|
@@ -453,12 +581,10 @@ class KimiLLMManager {
|
|
453 |
throw new Error("OpenRouter API key not configured");
|
454 |
}
|
455 |
const selectedLanguage = await this.db.getPreference("selectedLanguage", "en");
|
456 |
-
|
457 |
-
|
458 |
-
const personalityPrompt = await this.generateKimiPersonality();
|
459 |
const model = this.availableModels[this.currentModel];
|
460 |
-
|
461 |
-
languageInstruction + "\n" + (this.systemPrompt ? this.systemPrompt + "\n" + personalityPrompt : personalityPrompt);
|
462 |
const messages = [
|
463 |
{ role: "system", content: systemPromptContent },
|
464 |
...this.conversationContext.slice(-this.maxContextLength),
|
@@ -467,23 +593,84 @@ class KimiLLMManager {
|
|
467 |
|
468 |
// Normalize LLM options with safe defaults and DO NOT log sensitive payloads
|
469 |
const llmSettings = await this.db.getSetting("llm", {
|
470 |
-
temperature: 0.
|
471 |
-
maxTokens:
|
472 |
top_p: 0.9,
|
473 |
-
frequency_penalty: 0.
|
474 |
-
presence_penalty: 0.
|
475 |
});
|
|
|
476 |
const payload = {
|
477 |
model: this.currentModel,
|
478 |
messages: messages,
|
479 |
-
temperature:
|
480 |
-
|
481 |
-
|
|
|
|
|
|
|
|
|
482 |
frequency_penalty:
|
483 |
-
typeof options.frequencyPenalty === "number"
|
|
|
|
|
484 |
presence_penalty:
|
485 |
-
typeof options.presencePenalty === "number"
|
|
|
|
|
486 |
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
487 |
if (window.DEBUG_SAFE_LOGS) {
|
488 |
console.debug("LLM payload meta:", {
|
489 |
model: payload.model,
|
@@ -539,7 +726,7 @@ class KimiLLMManager {
|
|
539 |
if (best && best !== this.currentModel) {
|
540 |
// Try once with corrected model
|
541 |
this.currentModel = best;
|
542 |
-
await this.db.setPreference("
|
543 |
this._notifyModelChanged();
|
544 |
const retryResponse = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
545 |
method: "POST",
|
@@ -638,14 +825,11 @@ class KimiLLMManager {
|
|
638 |
async chatWithLocal(userMessage, options = {}) {
|
639 |
try {
|
640 |
const selectedLanguage = await this.db.getPreference("selectedLanguage", "en");
|
641 |
-
let languageInstruction =
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
(this.systemPrompt
|
647 |
-
? this.systemPrompt + "\n" + (await this.generateKimiPersonality())
|
648 |
-
: await this.generateKimiPersonality());
|
649 |
const response = await fetch("http://localhost:11434/api/chat", {
|
650 |
method: "POST",
|
651 |
headers: {
|
@@ -664,7 +848,33 @@ class KimiLLMManager {
|
|
664 |
throw new Error("Ollama not available");
|
665 |
}
|
666 |
const data = await response.json();
|
667 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
668 |
} catch (error) {
|
669 |
console.warn("Local LLM not available:", error);
|
670 |
return this.getFallbackResponse(userMessage);
|
|
|
6 |
this.currentModel = null;
|
7 |
this.conversationContext = [];
|
8 |
this.maxContextLength = 100;
|
9 |
+
this.personalityPrompt = "";
|
10 |
+
this.isGenerating = false;
|
11 |
|
12 |
// Recommended models on OpenRouter (IDs updated August 2025)
|
13 |
this.availableModels = {
|
|
|
98 |
console.warn("Unable to refresh remote models list:", e?.message || e);
|
99 |
}
|
100 |
|
101 |
+
// Migration: prefer llmModelId; if legacy defaultLLMModel exists and llmModelId missing, migrate
|
102 |
+
const legacyModel = await this.db.getPreference("defaultLLMModel", null);
|
103 |
+
let modelPref = await this.db.getPreference("llmModelId", null);
|
104 |
+
if (!modelPref && legacyModel) {
|
105 |
+
modelPref = legacyModel;
|
106 |
+
await this.db.setPreference("llmModelId", legacyModel);
|
107 |
+
}
|
108 |
+
const defaultModel = modelPref || "mistralai/mistral-small-3.2-24b-instruct";
|
109 |
await this.setCurrentModel(defaultModel);
|
110 |
await this.loadConversationContext();
|
111 |
}
|
|
|
126 |
}
|
127 |
|
128 |
this.currentModel = modelId;
|
129 |
+
// Single authoritative preference key
|
130 |
+
await this.db.setPreference("llmModelId", modelId);
|
131 |
|
132 |
const modelData = await this.db.getLLMModel(modelId);
|
133 |
if (modelData) {
|
|
|
149 |
this.conversationContext = msgs.slice(-this.maxContextLength * 2);
|
150 |
}
|
151 |
|
152 |
+
// Unified full prompt builder: reuse full legacy personality block + ranked concise snapshot
|
153 |
+
async assemblePrompt(userMessage) {
|
154 |
+
const fullPersonality = await this.generateKimiPersonality();
|
155 |
+
let rankedSnapshot = "";
|
156 |
+
if (window.kimiMemorySystem && window.kimiMemorySystem.memoryEnabled) {
|
157 |
+
try {
|
158 |
+
const recentContext =
|
159 |
+
this.conversationContext
|
160 |
+
.slice(-3)
|
161 |
+
.map(m => m.content)
|
162 |
+
.join(" ") +
|
163 |
+
" " +
|
164 |
+
(userMessage || "");
|
165 |
+
const ranked = await window.kimiMemorySystem.getRankedMemories(recentContext, 7);
|
166 |
+
const sanitize = txt =>
|
167 |
+
String(txt || "")
|
168 |
+
.replace(/[\r\n]+/g, " ")
|
169 |
+
.replace(/[`]{3,}/g, "")
|
170 |
+
.replace(/<{2,}|>{2,}/g, "")
|
171 |
+
.trim()
|
172 |
+
.slice(0, 180);
|
173 |
+
const lines = [];
|
174 |
+
for (const mem of ranked) {
|
175 |
+
try {
|
176 |
+
if (mem.id) await window.kimiMemorySystem?.recordMemoryAccess(mem.id);
|
177 |
+
} catch {}
|
178 |
+
const imp = typeof mem.importance === "number" ? mem.importance : 0.5;
|
179 |
+
lines.push(`- (${imp.toFixed(2)}) ${mem.category}: ${sanitize(mem.content)}`);
|
180 |
+
}
|
181 |
+
if (lines.length) {
|
182 |
+
rankedSnapshot = ["", "RANKED MEMORY SNAPSHOT (concise high-signal list):", ...lines].join("\n");
|
183 |
+
}
|
184 |
+
} catch (e) {
|
185 |
+
console.warn("Ranked snapshot failed:", e);
|
186 |
+
}
|
187 |
+
}
|
188 |
+
return fullPersonality + rankedSnapshot;
|
189 |
+
}
|
190 |
+
|
191 |
async generateKimiPersonality() {
|
192 |
+
// Full personality prompt builder (authoritative)
|
193 |
const character = await this.db.getSelectedCharacter();
|
194 |
const personality = await this.db.getAllPersonalityTraits(character);
|
195 |
|
196 |
+
// Get the custom character prompt from database
|
197 |
+
const characterPrompt = await this.db.getSystemPromptForCharacter(character);
|
198 |
+
|
199 |
+
// Get language instruction based on selected language
|
200 |
+
const selectedLang = await this.db.getPreference("selectedLanguage", "en");
|
201 |
+
let languageInstruction;
|
202 |
+
|
203 |
+
switch (selectedLang) {
|
204 |
+
case "fr":
|
205 |
+
languageInstruction =
|
206 |
+
"Your default language is French. Always respond in French unless the user specifically asks you to respond in another language (e.g., 'respond in English', 'réponds en italien', etc.).";
|
207 |
+
break;
|
208 |
+
case "es":
|
209 |
+
languageInstruction =
|
210 |
+
"Your default language is Spanish. Always respond in Spanish unless the user specifically asks you to respond in another language (e.g., 'respond in English', 'responde en francés', etc.).";
|
211 |
+
break;
|
212 |
+
case "de":
|
213 |
+
languageInstruction =
|
214 |
+
"Your default language is German. Always respond in German unless the user specifically asks you to respond in another language (e.g., 'respond in English', 'antworte auf Französisch', etc.).";
|
215 |
+
break;
|
216 |
+
case "it":
|
217 |
+
languageInstruction =
|
218 |
+
"Your default language is Italian. Always respond in Italian unless the user specifically asks you to respond in another language (e.g., 'respond in English', 'rispondi in francese', etc.).";
|
219 |
+
break;
|
220 |
+
case "ja":
|
221 |
+
languageInstruction =
|
222 |
+
"Your default language is Japanese. Always respond in Japanese unless the user specifically asks you to respond in another language (e.g., 'respond in English', '英語で答えて', etc.).";
|
223 |
+
break;
|
224 |
+
case "zh":
|
225 |
+
languageInstruction =
|
226 |
+
"Your default language is Chinese. Always respond in Chinese unless the user specifically asks you to respond in another language (e.g., 'respond in English', '用法语回答', etc.).";
|
227 |
+
break;
|
228 |
+
default:
|
229 |
+
languageInstruction =
|
230 |
+
"Your default language is English. Always respond in English unless the user specifically asks you to respond in another language (e.g., 'respond in French', 'reply in Spanish', etc.).";
|
231 |
+
break;
|
232 |
+
}
|
233 |
+
|
234 |
// Get relevant memories for context with improved intelligence
|
235 |
let memoryContext = "";
|
236 |
if (window.kimiMemorySystem && window.kimiMemorySystem.memoryEnabled) {
|
|
|
300 |
// Use unified personality calculation
|
301 |
const avg = window.getPersonalityAverage
|
302 |
? window.getPersonalityAverage(personality)
|
303 |
+
: (personality.affection +
|
304 |
+
personality.romance +
|
305 |
+
personality.empathy +
|
306 |
+
personality.playfulness +
|
307 |
+
personality.humor +
|
308 |
+
personality.intelligence) /
|
309 |
+
6;
|
310 |
|
311 |
let affectionDesc = window.kimiI18nManager?.t("trait_description_affection") || "Be loving and caring.";
|
312 |
let romanceDesc = window.kimiI18nManager?.t("trait_description_romance") || "Be romantic and sweet.";
|
313 |
let empathyDesc = window.kimiI18nManager?.t("trait_description_empathy") || "Be empathetic and understanding.";
|
314 |
let playfulnessDesc = window.kimiI18nManager?.t("trait_description_playfulness") || "Be occasionally playful.";
|
315 |
let humorDesc = window.kimiI18nManager?.t("trait_description_humor") || "Be occasionally playful and witty.";
|
316 |
+
let intelligenceDesc = "Be smart and insightful.";
|
317 |
if (avg <= 20) {
|
318 |
affectionDesc = "Do not show affection.";
|
319 |
romanceDesc = "Do not be romantic.";
|
320 |
empathyDesc = "Do not show empathy.";
|
321 |
playfulnessDesc = "Do not be playful.";
|
322 |
humorDesc = "Do not use humor in your responses.";
|
323 |
+
intelligenceDesc = "Keep responses simple and avoid showing deep insight.";
|
324 |
} else if (avg <= 60) {
|
325 |
affectionDesc = "Show a little affection.";
|
326 |
romanceDesc = "Be a little romantic.";
|
327 |
empathyDesc = "Show a little empathy.";
|
328 |
playfulnessDesc = "Be a little playful.";
|
329 |
humorDesc = "Use a little humor in your responses.";
|
330 |
+
intelligenceDesc = "Be moderately analytical without overwhelming detail.";
|
331 |
} else {
|
332 |
if (affection >= 90) affectionDesc = "Be extremely loving, caring, and affectionate in every response.";
|
333 |
else if (affection >= 60) affectionDesc = "Show affection often.";
|
|
|
339 |
else if (playfulness >= 60) playfulnessDesc = "Be playful often.";
|
340 |
if (humor >= 90) humorDesc = "Make your responses very humorous, playful, and witty whenever possible.";
|
341 |
else if (humor >= 60) humorDesc = "Use humor often in your responses.";
|
342 |
+
if (intelligence >= 90) intelligenceDesc = "Demonstrate very high reasoning skill succinctly when helpful.";
|
343 |
+
else if (intelligence >= 60) intelligenceDesc = "Show clear reasoning and helpful structured thinking.";
|
344 |
}
|
345 |
let affectionateInstruction = "";
|
346 |
if (affection >= 80) {
|
347 |
affectionateInstruction = "Respond using warm, kind, affectionate, and loving language.";
|
348 |
}
|
349 |
+
|
350 |
+
// Use the custom character prompt as the base
|
351 |
+
let basePrompt = characterPrompt || "";
|
352 |
+
if (!basePrompt) {
|
353 |
+
// Fallback to default if no custom prompt
|
354 |
+
const defaultCharacter = window.KIMI_CHARACTERS[character];
|
355 |
+
basePrompt = defaultCharacter?.defaultPrompt || "You are a virtual companion.";
|
|
|
|
|
356 |
}
|
357 |
+
|
358 |
const personalityPrompt = [
|
359 |
+
// Language directive moved to absolute top for stronger model adherence.
|
360 |
+
"PRIMARY LANGUAGE POLICY:",
|
361 |
+
languageInstruction,
|
362 |
"",
|
363 |
+
"CHARACTER CORE IDENTITY:",
|
364 |
+
basePrompt,
|
365 |
+
"",
|
366 |
+
"CURRENT PERSONALITY STATE:",
|
367 |
`- Affection: ${affection}/100`,
|
368 |
`- Playfulness: ${playfulness}/100`,
|
369 |
`- Intelligence: ${intelligence}/100`,
|
|
|
374 |
"TRAIT INSTRUCTIONS:",
|
375 |
`Affection: ${affectionDesc}`,
|
376 |
`Playfulness: ${playfulnessDesc}`,
|
377 |
+
`Intelligence: ${intelligenceDesc}`,
|
378 |
`Empathy: ${empathyDesc}`,
|
379 |
`Humor: ${humorDesc}`,
|
380 |
`Romance: ${romanceDesc}`,
|
|
|
392 |
"- Adapt your tone to the emotional context",
|
393 |
"- Remember past conversations",
|
394 |
"- Be spontaneous and sometimes surprising",
|
395 |
+
"- Vary your expressions, your phrasing, use synonyms, and avoid repeating the same words or sentences",
|
396 |
+
"- When asked to perform a visible action (for example 'dance'), reply in first-person present tense like: 'I start to dance, swaying slowly, arms raised.' Keep action descriptions short (1–3 sentences)",
|
397 |
+
"- The user sees you in video within the app UI. When the user asks you to perform a physical action (e.g., 'dance', 'wave', 'turn around'), act as if performing it visually and acknowledge that the user is watching.",
|
398 |
+
"- When performing actions, speak and describe in the first person, present tense (for example: 'I step forward and begin to dance, swaying slowly'). Use short present-tense action phrases to convey movement.",
|
399 |
+
"- If the user requests a visible action, execute it: describe the action briefly in first person present and behave as if the action is visible in the app's video area.",
|
400 |
memoryContext,
|
401 |
"",
|
402 |
+
// Final explicit reminder (lightweight) reinforcing consistent language use.
|
403 |
+
"FINAL LANGUAGE REMINDER:",
|
404 |
+
languageInstruction,
|
405 |
+
"",
|
406 |
+
"You must respond consistently with this personality, these memories, and the primary language policy unless the user explicitly requests a different language."
|
407 |
].join("\n");
|
|
|
|
|
408 |
|
409 |
+
// Return legacy detailed personality block for any component still expecting it
|
410 |
+
return personalityPrompt;
|
411 |
}
|
412 |
|
413 |
async refreshMemoryContext() {
|
414 |
// Refresh the personality prompt with updated memories
|
415 |
// This will be called when memories are added/updated/deleted
|
416 |
try {
|
417 |
+
this.personalityPrompt = await this.assemblePrompt("");
|
418 |
} catch (error) {
|
419 |
console.warn("Error refreshing memory context:", error);
|
420 |
}
|
|
|
434 |
}
|
435 |
|
436 |
async chat(userMessage, options = {}) {
|
437 |
+
// Unified retrieval of LLM numeric parameters from settings.llm (single source of truth)
|
438 |
+
const llmSettings = await this.db.getSetting("llm", {
|
439 |
+
temperature: 0.8,
|
440 |
+
maxTokens: 400,
|
441 |
+
top_p: 0.9,
|
442 |
+
frequency_penalty: 0.6,
|
443 |
+
presence_penalty: 0.5
|
444 |
+
});
|
445 |
+
const temperature = typeof options.temperature === "number" ? options.temperature : llmSettings.temperature;
|
446 |
+
const maxTokens = typeof options.maxTokens === "number" ? options.maxTokens : llmSettings.maxTokens;
|
447 |
const opts = { ...options, temperature, maxTokens };
|
448 |
try {
|
449 |
const provider = await this.db.getPreference("llmProvider", "openrouter");
|
|
|
479 |
if (!apiKey) {
|
480 |
throw new Error("API key not configured for selected provider");
|
481 |
}
|
482 |
+
const systemPromptContent = await this.assemblePrompt(userMessage);
|
|
|
|
|
|
|
|
|
483 |
|
484 |
const llmSettings = await this.db.getSetting("llm", {
|
485 |
+
temperature: 0.8,
|
486 |
+
maxTokens: 400,
|
487 |
top_p: 0.9,
|
488 |
+
frequency_penalty: 0.6,
|
489 |
+
presence_penalty: 0.5
|
490 |
});
|
491 |
+
// Unified fallback defaults (must stay consistent with database defaults)
|
492 |
+
const unifiedDefaults = { temperature: 0.9, maxTokens: 400, top_p: 0.9, frequency_penalty: 0.6, presence_penalty: 0.5 };
|
493 |
const payload = {
|
494 |
model: modelId,
|
495 |
messages: [
|
|
|
497 |
...this.conversationContext.slice(-this.maxContextLength),
|
498 |
{ role: "user", content: userMessage }
|
499 |
],
|
500 |
+
temperature:
|
501 |
+
typeof options.temperature === "number"
|
502 |
+
? options.temperature
|
503 |
+
: (llmSettings.temperature ?? unifiedDefaults.temperature),
|
504 |
+
max_tokens:
|
505 |
+
typeof options.maxTokens === "number" ? options.maxTokens : (llmSettings.maxTokens ?? unifiedDefaults.maxTokens),
|
506 |
+
top_p: typeof options.topP === "number" ? options.topP : (llmSettings.top_p ?? unifiedDefaults.top_p),
|
507 |
frequency_penalty:
|
508 |
+
typeof options.frequencyPenalty === "number"
|
509 |
+
? options.frequencyPenalty
|
510 |
+
: (llmSettings.frequency_penalty ?? unifiedDefaults.frequency_penalty),
|
511 |
presence_penalty:
|
512 |
+
typeof options.presencePenalty === "number"
|
513 |
+
? options.presencePenalty
|
514 |
+
: (llmSettings.presence_penalty ?? unifiedDefaults.presence_penalty)
|
515 |
};
|
516 |
|
517 |
try {
|
518 |
+
if (window.KIMI_DEBUG_API_AUDIT) {
|
519 |
+
console.log(
|
520 |
+
"===== FULL SYSTEM PROMPT (OpenAI-Compatible) =====\n" +
|
521 |
+
systemPromptContent +
|
522 |
+
"\n===== END SYSTEM PROMPT ====="
|
523 |
+
);
|
524 |
+
}
|
525 |
const response = await fetch(baseUrl, {
|
526 |
method: "POST",
|
527 |
headers: {
|
|
|
581 |
throw new Error("OpenRouter API key not configured");
|
582 |
}
|
583 |
const selectedLanguage = await this.db.getPreference("selectedLanguage", "en");
|
584 |
+
// languageInstruction removed (already integrated in personality prompt generation)
|
585 |
+
let languageInstruction = ""; // Kept for structural compatibility
|
|
|
586 |
const model = this.availableModels[this.currentModel];
|
587 |
+
const systemPromptContent = await this.assemblePrompt(userMessage);
|
|
|
588 |
const messages = [
|
589 |
{ role: "system", content: systemPromptContent },
|
590 |
...this.conversationContext.slice(-this.maxContextLength),
|
|
|
593 |
|
594 |
// Normalize LLM options with safe defaults and DO NOT log sensitive payloads
|
595 |
const llmSettings = await this.db.getSetting("llm", {
|
596 |
+
temperature: 0.8,
|
597 |
+
maxTokens: 400,
|
598 |
top_p: 0.9,
|
599 |
+
frequency_penalty: 0.6,
|
600 |
+
presence_penalty: 0.5
|
601 |
});
|
602 |
+
const unifiedDefaults = { temperature: 0.8, maxTokens: 400, top_p: 0.9, frequency_penalty: 0.6, presence_penalty: 0.5 };
|
603 |
const payload = {
|
604 |
model: this.currentModel,
|
605 |
messages: messages,
|
606 |
+
temperature:
|
607 |
+
typeof options.temperature === "number"
|
608 |
+
? options.temperature
|
609 |
+
: (llmSettings.temperature ?? unifiedDefaults.temperature),
|
610 |
+
max_tokens:
|
611 |
+
typeof options.maxTokens === "number" ? options.maxTokens : (llmSettings.maxTokens ?? unifiedDefaults.maxTokens),
|
612 |
+
top_p: typeof options.topP === "number" ? options.topP : (llmSettings.top_p ?? unifiedDefaults.top_p),
|
613 |
frequency_penalty:
|
614 |
+
typeof options.frequencyPenalty === "number"
|
615 |
+
? options.frequencyPenalty
|
616 |
+
: (llmSettings.frequency_penalty ?? unifiedDefaults.frequency_penalty),
|
617 |
presence_penalty:
|
618 |
+
typeof options.presencePenalty === "number"
|
619 |
+
? options.presencePenalty
|
620 |
+
: (llmSettings.presence_penalty ?? unifiedDefaults.presence_penalty)
|
621 |
};
|
622 |
+
|
623 |
+
if (window.KIMI_DEBUG_API_AUDIT) {
|
624 |
+
console.log("╔═══════════════════════════════════════════════════════════════════╗");
|
625 |
+
console.log("║ 🔍 AUDIT COMPLET API - ENVOI MESSAGE ║");
|
626 |
+
console.log("╚═══════════════════════════════════════════════════════════════════╝");
|
627 |
+
console.log("📋 1. INFORMATIONS GÉNÉRALES:");
|
628 |
+
console.log(" 📡 URL API:", "https://openrouter.ai/api/v1/chat/completions");
|
629 |
+
console.log(" 🤖 Modèle:", payload.model);
|
630 |
+
console.log(" 🎭 Personnage:", await this.db.getSelectedCharacter());
|
631 |
+
console.log(" 🗣️ Langue:", await this.db.getPreference("selectedLanguage", "en"));
|
632 |
+
console.log("\n📋 2. HEADERS HTTP:");
|
633 |
+
console.log(" 🔑 Authorization: Bearer", apiKey.substring(0, 10) + "...");
|
634 |
+
console.log(" 📄 Content-Type: application/json");
|
635 |
+
console.log(" 🌐 HTTP-Referer:", window.location.origin);
|
636 |
+
console.log(" 🏷️ X-Title: Kimi - Virtual Companion");
|
637 |
+
console.log("\n⚙️ 3. PARAMÈTRES LLM:");
|
638 |
+
console.log(" 🌡️ Temperature:", payload.temperature);
|
639 |
+
console.log(" 📏 Max Tokens:", payload.max_tokens);
|
640 |
+
console.log(" 🎯 Top P:", payload.top_p);
|
641 |
+
console.log(" 🔄 Frequency Penalty:", payload.frequency_penalty);
|
642 |
+
console.log(" 👤 Presence Penalty:", payload.presence_penalty);
|
643 |
+
console.log("\n🎭 4. PROMPT SYSTÈME GÉNÉRÉ:");
|
644 |
+
const systemMessage = payload.messages.find(m => m.role === "system");
|
645 |
+
if (systemMessage) {
|
646 |
+
console.log(" 📝 Longueur du prompt:", systemMessage.content.length, "caractères");
|
647 |
+
console.log(" 📄 CONTENU COMPLET DU PROMPT:");
|
648 |
+
console.log(" " + "─".repeat(80));
|
649 |
+
// Imprimer chaque ligne avec indentation
|
650 |
+
systemMessage.content.split(/\n/).forEach(l => console.log(" " + l));
|
651 |
+
console.log(" " + "─".repeat(80));
|
652 |
+
}
|
653 |
+
console.log("\n💬 5. CONTEXTE DE CONVERSATION:");
|
654 |
+
console.log(" 📊 Nombre total de messages:", payload.messages.length);
|
655 |
+
console.log(" 📋 Détail des messages:");
|
656 |
+
payload.messages.forEach((msg, index) => {
|
657 |
+
if (msg.role === "system") {
|
658 |
+
console.log(` [${index}] 🎭 SYSTEM: ${msg.content.length} caractères`);
|
659 |
+
} else if (msg.role === "user") {
|
660 |
+
console.log(` [${index}] 👤 USER: "${msg.content}"`);
|
661 |
+
} else if (msg.role === "assistant") {
|
662 |
+
console.log(` [${index}] 🤖 ASSISTANT: "${msg.content.substring(0, 120)}..."`);
|
663 |
+
}
|
664 |
+
});
|
665 |
+
const payloadSize = JSON.stringify(payload).length;
|
666 |
+
console.log("\n📦 6. TAILLE DU PAYLOAD:");
|
667 |
+
console.log(" 📝 Taille totale:", payloadSize, "caractères");
|
668 |
+
console.log(" 💾 Taille en KB:", Math.round((payloadSize / 1024) * 100) / 100, "KB");
|
669 |
+
console.log("\n🚀 Envoi en cours vers l'API...");
|
670 |
+
console.log("╔═══════════════════════════════════════════════════════════════════╗");
|
671 |
+
}
|
672 |
+
// ===== FIN AUDIT =====
|
673 |
+
|
674 |
if (window.DEBUG_SAFE_LOGS) {
|
675 |
console.debug("LLM payload meta:", {
|
676 |
model: payload.model,
|
|
|
726 |
if (best && best !== this.currentModel) {
|
727 |
// Try once with corrected model
|
728 |
this.currentModel = best;
|
729 |
+
await this.db.setPreference("llmModelId", best);
|
730 |
this._notifyModelChanged();
|
731 |
const retryResponse = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
732 |
method: "POST",
|
|
|
825 |
async chatWithLocal(userMessage, options = {}) {
|
826 |
try {
|
827 |
const selectedLanguage = await this.db.getPreference("selectedLanguage", "en");
|
828 |
+
let languageInstruction = ""; // Removed generic duplication
|
829 |
+
let systemPromptContent = await this.assemblePrompt(userMessage);
|
830 |
+
if (window.KIMI_DEBUG_API_AUDIT) {
|
831 |
+
console.log("===== FULL SYSTEM PROMPT (Local) =====\n" + systemPromptContent + "\n===== END SYSTEM PROMPT =====");
|
832 |
+
}
|
|
|
|
|
|
|
833 |
const response = await fetch("http://localhost:11434/api/chat", {
|
834 |
method: "POST",
|
835 |
headers: {
|
|
|
848 |
throw new Error("Ollama not available");
|
849 |
}
|
850 |
const data = await response.json();
|
851 |
+
const content = data?.message?.content || data?.choices?.[0]?.message?.content || "";
|
852 |
+
if (!content) throw new Error("Local model returned empty response");
|
853 |
+
|
854 |
+
// Add to context like other providers
|
855 |
+
this.conversationContext.push(
|
856 |
+
{ role: "user", content: userMessage, timestamp: new Date().toISOString() },
|
857 |
+
{ role: "assistant", content: content, timestamp: new Date().toISOString() }
|
858 |
+
);
|
859 |
+
if (this.conversationContext.length > this.maxContextLength * 2) {
|
860 |
+
this.conversationContext = this.conversationContext.slice(-this.maxContextLength * 2);
|
861 |
+
}
|
862 |
+
|
863 |
+
// Estimate token usage for local model (heuristic)
|
864 |
+
try {
|
865 |
+
const est = window.KimiTokenUtils?.estimate || (t => Math.ceil((t || "").length / 4));
|
866 |
+
const tokensIn = est(userMessage + " " + systemPromptContent);
|
867 |
+
const tokensOut = est(content);
|
868 |
+
window._lastKimiTokenUsage = { tokensIn, tokensOut };
|
869 |
+
const character = await this.db.getSelectedCharacter();
|
870 |
+
const prevIn = Number(await this.db.getPreference(`totalTokensIn_${character}`, 0)) || 0;
|
871 |
+
const prevOut = Number(await this.db.getPreference(`totalTokensOut_${character}`, 0)) || 0;
|
872 |
+
await this.db.setPreference(`totalTokensIn_${character}`, prevIn + tokensIn);
|
873 |
+
await this.db.setPreference(`totalTokensOut_${character}`, prevOut + tokensOut);
|
874 |
+
} catch (e) {
|
875 |
+
console.warn("Token usage estimation failed (local):", e);
|
876 |
+
}
|
877 |
+
return content;
|
878 |
} catch (error) {
|
879 |
console.warn("Local LLM not available:", error);
|
880 |
return this.getFallbackResponse(userMessage);
|
kimi-js/kimi-memory-system.js
CHANGED
@@ -643,17 +643,20 @@ class KimiMemorySystem {
|
|
643 |
}
|
644 |
|
645 |
// Add memory with metadata (let DB auto-generate ID)
|
|
|
646 |
const memory = {
|
647 |
category: memoryData.category || "personal",
|
648 |
type: memoryData.type || "manual",
|
649 |
content: memoryData.content,
|
650 |
sourceText: memoryData.sourceText || "",
|
651 |
confidence: memoryData.confidence || 1.0,
|
652 |
-
timestamp: memoryData.timestamp ||
|
653 |
character: memoryData.character || this.selectedCharacter,
|
654 |
isActive: true,
|
655 |
tags: [...new Set([...(memoryData.tags || []), ...this.deriveMemoryTags(memoryData)])],
|
656 |
-
lastModified:
|
|
|
|
|
657 |
accessCount: 0,
|
658 |
importance: this.calculateImportance(memoryData)
|
659 |
};
|
@@ -1406,7 +1409,7 @@ class KimiMemorySystem {
|
|
1406 |
const memory = await this.db.db.memories.get(memoryId);
|
1407 |
if (memory) {
|
1408 |
memory.accessCount = (memory.accessCount || 0) + 1;
|
1409 |
-
memory.
|
1410 |
await this.db.db.memories.put(memory);
|
1411 |
}
|
1412 |
} catch (error) {
|
@@ -1414,6 +1417,47 @@ class KimiMemorySystem {
|
|
1414 |
}
|
1415 |
}
|
1416 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1417 |
// MEMORY STATISTICS
|
1418 |
async getMemoryStats() {
|
1419 |
try {
|
|
|
643 |
}
|
644 |
|
645 |
// Add memory with metadata (let DB auto-generate ID)
|
646 |
+
const now = new Date();
|
647 |
const memory = {
|
648 |
category: memoryData.category || "personal",
|
649 |
type: memoryData.type || "manual",
|
650 |
content: memoryData.content,
|
651 |
sourceText: memoryData.sourceText || "",
|
652 |
confidence: memoryData.confidence || 1.0,
|
653 |
+
timestamp: memoryData.timestamp || now,
|
654 |
character: memoryData.character || this.selectedCharacter,
|
655 |
isActive: true,
|
656 |
tags: [...new Set([...(memoryData.tags || []), ...this.deriveMemoryTags(memoryData)])],
|
657 |
+
lastModified: now,
|
658 |
+
createdAt: now,
|
659 |
+
lastAccess: now,
|
660 |
accessCount: 0,
|
661 |
importance: this.calculateImportance(memoryData)
|
662 |
};
|
|
|
1409 |
const memory = await this.db.db.memories.get(memoryId);
|
1410 |
if (memory) {
|
1411 |
memory.accessCount = (memory.accessCount || 0) + 1;
|
1412 |
+
memory.lastAccess = new Date();
|
1413 |
await this.db.db.memories.put(memory);
|
1414 |
}
|
1415 |
} catch (error) {
|
|
|
1417 |
}
|
1418 |
}
|
1419 |
|
1420 |
+
// ===== MEMORY SCORING & RANKING =====
|
1421 |
+
scoreMemory(memory) {
|
1422 |
+
// Factors: importance (0-1), recency, frequency, confidence
|
1423 |
+
const now = Date.now();
|
1424 |
+
const created = memory.createdAt
|
1425 |
+
? new Date(memory.createdAt).getTime()
|
1426 |
+
: memory.timestamp
|
1427 |
+
? new Date(memory.timestamp).getTime()
|
1428 |
+
: now;
|
1429 |
+
const lastAccess = memory.lastAccess ? new Date(memory.lastAccess).getTime() : created;
|
1430 |
+
const ageMs = Math.max(1, now - created);
|
1431 |
+
const sinceLastAccessMs = Math.max(1, now - lastAccess);
|
1432 |
+
// Recency: exponential decay
|
1433 |
+
const recency = Math.exp(-sinceLastAccessMs / (1000 * 60 * 60 * 24 * 14)); // 14-day half-life approx
|
1434 |
+
const freshness = Math.exp(-ageMs / (1000 * 60 * 60 * 24 * 60)); // 60-day aging
|
1435 |
+
const freq = Math.log10((memory.accessCount || 0) + 1) / Math.log10(50); // normalized frequency (cap ~50)
|
1436 |
+
const importance = typeof memory.importance === "number" ? memory.importance : 0.5;
|
1437 |
+
const confidence = typeof memory.confidence === "number" ? memory.confidence : 0.5;
|
1438 |
+
// Weighted sum
|
1439 |
+
const score = importance * 0.35 + recency * 0.2 + freq * 0.15 + confidence * 0.2 + freshness * 0.1;
|
1440 |
+
return Number(score.toFixed(6));
|
1441 |
+
}
|
1442 |
+
|
1443 |
+
async getRankedMemories(contextText = "", limit = 7) {
|
1444 |
+
const all = await this.getAllMemories();
|
1445 |
+
if (!all.length) return [];
|
1446 |
+
// Optional basic context relevance boost
|
1447 |
+
const ctxLower = (contextText || "").toLowerCase();
|
1448 |
+
return all
|
1449 |
+
.map(m => {
|
1450 |
+
let baseScore = this.scoreMemory(m);
|
1451 |
+
if (ctxLower && m.content && ctxLower.includes(m.content.toLowerCase().split(" ")[0])) {
|
1452 |
+
baseScore += 0.05; // tiny relevance boost
|
1453 |
+
}
|
1454 |
+
return { memory: m, score: baseScore };
|
1455 |
+
})
|
1456 |
+
.sort((a, b) => b.score - a.score)
|
1457 |
+
.slice(0, limit)
|
1458 |
+
.map(r => r.memory);
|
1459 |
+
}
|
1460 |
+
|
1461 |
// MEMORY STATISTICS
|
1462 |
async getMemoryStats() {
|
1463 |
try {
|
kimi-js/kimi-module.js
CHANGED
@@ -297,20 +297,103 @@ async function loadCharacterSection() {
|
|
297 |
promptInput.id = `prompt-${key}`;
|
298 |
promptInput.rows = 6;
|
299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
card.appendChild(img);
|
301 |
card.appendChild(infoDiv);
|
302 |
card.appendChild(promptLabel);
|
303 |
card.appendChild(promptInput);
|
|
|
304 |
characterGrid.appendChild(card);
|
305 |
}
|
306 |
applyTranslations();
|
|
|
|
|
307 |
for (const key of Object.keys(window.KIMI_CHARACTERS)) {
|
308 |
const promptInput = document.getElementById(`prompt-${key}`);
|
|
|
|
|
|
|
309 |
if (promptInput) {
|
310 |
const prompt = await kimiDB.getSystemPromptForCharacter(key);
|
311 |
promptInput.value = prompt;
|
312 |
promptInput.disabled = key !== selectedCharacter;
|
313 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
}
|
315 |
characterGrid.querySelectorAll(".character-card").forEach(card => {
|
316 |
card.addEventListener("click", async () => {
|
@@ -319,7 +402,12 @@ async function loadCharacterSection() {
|
|
319 |
const charKey = card.dataset.character;
|
320 |
for (const key of Object.keys(window.KIMI_CHARACTERS)) {
|
321 |
const promptInput = document.getElementById(`prompt-${key}`);
|
|
|
|
|
|
|
322 |
if (promptInput) promptInput.disabled = key !== charKey;
|
|
|
|
|
323 |
}
|
324 |
updateFavorabilityLabel(charKey);
|
325 |
const chatHeaderName = document.querySelector(".chat-header span[data-i18n]");
|
@@ -684,11 +772,11 @@ async function loadSettingsData() {
|
|
684 |
const modelId = preferences.llmModelId || (window.kimiLLM ? window.kimiLLM.currentModel : "");
|
685 |
const genericKey = preferences.llmApiKey || "";
|
686 |
const selectedCharacter = preferences.selectedCharacter || "kimi";
|
687 |
-
const llmTemperature = preferences.llmTemperature !== undefined ? preferences.llmTemperature : 0.
|
688 |
-
const llmMaxTokens = preferences.llmMaxTokens !== undefined ? preferences.llmMaxTokens :
|
689 |
const llmTopP = preferences.llmTopP !== undefined ? preferences.llmTopP : 0.9;
|
690 |
-
const llmFrequencyPenalty = preferences.llmFrequencyPenalty !== undefined ? preferences.llmFrequencyPenalty : 0.
|
691 |
-
const llmPresencePenalty = preferences.llmPresencePenalty !== undefined ? preferences.llmPresencePenalty : 0.
|
692 |
|
693 |
// Update UI with voice settings
|
694 |
const languageSelect = document.getElementById("language-selection");
|
@@ -707,7 +795,7 @@ async function loadSettingsData() {
|
|
707 |
// Batch load personality traits
|
708 |
const traitNames = ["affection", "playfulness", "intelligence", "empathy", "humor", "romance"];
|
709 |
const personality = await kimiDB.getPersonalityTraitsBatch(traitNames, selectedCharacter);
|
710 |
-
const defaults = [
|
711 |
|
712 |
traitNames.forEach((trait, index) => {
|
713 |
const value = typeof personality[trait] === "number" ? personality[trait] : defaults[index];
|
@@ -751,15 +839,6 @@ async function loadSettingsData() {
|
|
751 |
: "API Key";
|
752 |
}
|
753 |
|
754 |
-
// Load system prompt
|
755 |
-
let systemPrompt = DEFAULT_SYSTEM_PROMPT;
|
756 |
-
if (kimiDB.getSystemPromptForCharacter) {
|
757 |
-
systemPrompt = await kimiDB.getSystemPromptForCharacter(selectedCharacter);
|
758 |
-
}
|
759 |
-
const systemPromptInput = document.getElementById("system-prompt");
|
760 |
-
if (systemPromptInput) systemPromptInput.value = systemPrompt;
|
761 |
-
if (kimiLLM && kimiLLM.setSystemPrompt) kimiLLM.setSystemPrompt(systemPrompt);
|
762 |
-
|
763 |
loadAvailableModels();
|
764 |
} catch (error) {
|
765 |
console.error("Error while loading settings:", error);
|
@@ -883,7 +962,7 @@ async function syncLLMMaxTokensSlider() {
|
|
883 |
const llmMaxTokensSlider = document.getElementById("llm-max-tokens");
|
884 |
const llmMaxTokensValue = document.getElementById("llm-max-tokens-value");
|
885 |
if (llmMaxTokensSlider && llmMaxTokensValue && kimiDB) {
|
886 |
-
const saved = await kimiDB.getPreference("llmMaxTokens",
|
887 |
llmMaxTokensSlider.value = saved;
|
888 |
llmMaxTokensValue.textContent = saved;
|
889 |
}
|
@@ -894,7 +973,7 @@ async function syncLLMTemperatureSlider() {
|
|
894 |
const llmTemperatureSlider = document.getElementById("llm-temperature");
|
895 |
const llmTemperatureValue = document.getElementById("llm-temperature-value");
|
896 |
if (llmTemperatureSlider && llmTemperatureValue && kimiDB) {
|
897 |
-
const saved = await kimiDB.getPreference("llmTemperature", 0.
|
898 |
llmTemperatureSlider.value = saved;
|
899 |
llmTemperatureValue.textContent = saved;
|
900 |
}
|
@@ -1469,7 +1548,7 @@ function setupSettingsListeners(kimiDB, kimiMemory) {
|
|
1469 |
if (llmTemperatureSlider) {
|
1470 |
const listener = e => {
|
1471 |
const validation = window.KimiValidationUtils?.validateRange(e.target.value, "llmTemperature");
|
1472 |
-
const value = validation?.value || parseFloat(e.target.value) || 0.
|
1473 |
|
1474 |
document.getElementById("llm-temperature-value").textContent = value;
|
1475 |
e.target.value = value;
|
@@ -1481,7 +1560,7 @@ function setupSettingsListeners(kimiDB, kimiMemory) {
|
|
1481 |
if (llmMaxTokensSlider) {
|
1482 |
const listener = e => {
|
1483 |
const validation = window.KimiValidationUtils?.validateRange(e.target.value, "llmMaxTokens");
|
1484 |
-
const value = validation?.value || parseInt(e.target.value) ||
|
1485 |
|
1486 |
document.getElementById("llm-max-tokens-value").textContent = value;
|
1487 |
e.target.value = value;
|
@@ -1505,7 +1584,7 @@ function setupSettingsListeners(kimiDB, kimiMemory) {
|
|
1505 |
if (llmFrequencyPenaltySlider) {
|
1506 |
const listener = e => {
|
1507 |
const validation = window.KimiValidationUtils?.validateRange(e.target.value, "llmFrequencyPenalty");
|
1508 |
-
const value = validation?.value || parseFloat(e.target.value) || 0.
|
1509 |
|
1510 |
document.getElementById("llm-frequency-penalty-value").textContent = value;
|
1511 |
e.target.value = value;
|
@@ -1517,7 +1596,7 @@ function setupSettingsListeners(kimiDB, kimiMemory) {
|
|
1517 |
if (llmPresencePenaltySlider) {
|
1518 |
const listener = e => {
|
1519 |
const validation = window.KimiValidationUtils?.validateRange(e.target.value, "llmPresencePenalty");
|
1520 |
-
const value = validation?.value || parseFloat(e.target.value) || 0.
|
1521 |
|
1522 |
document.getElementById("llm-presence-penalty-value").textContent = value;
|
1523 |
e.target.value = value;
|
|
|
297 |
promptInput.id = `prompt-${key}`;
|
298 |
promptInput.rows = 6;
|
299 |
|
300 |
+
// Create buttons container
|
301 |
+
const buttonsContainer = document.createElement("div");
|
302 |
+
buttonsContainer.className = "character-prompt-buttons";
|
303 |
+
|
304 |
+
// Save button
|
305 |
+
const saveButton = document.createElement("button");
|
306 |
+
saveButton.className = "kimi-button character-save-btn";
|
307 |
+
saveButton.id = `save-${key}`;
|
308 |
+
saveButton.setAttribute("data-i18n", "save");
|
309 |
+
saveButton.textContent = "Save";
|
310 |
+
|
311 |
+
// Reset button
|
312 |
+
const resetButton = document.createElement("button");
|
313 |
+
resetButton.className = "kimi-button character-reset-btn";
|
314 |
+
resetButton.id = `reset-${key}`;
|
315 |
+
resetButton.setAttribute("data-i18n", "reset_to_default");
|
316 |
+
resetButton.textContent = "Reset to Default";
|
317 |
+
|
318 |
+
buttonsContainer.appendChild(saveButton);
|
319 |
+
buttonsContainer.appendChild(resetButton);
|
320 |
+
|
321 |
card.appendChild(img);
|
322 |
card.appendChild(infoDiv);
|
323 |
card.appendChild(promptLabel);
|
324 |
card.appendChild(promptInput);
|
325 |
+
card.appendChild(buttonsContainer);
|
326 |
characterGrid.appendChild(card);
|
327 |
}
|
328 |
applyTranslations();
|
329 |
+
|
330 |
+
// Initialize prompt values and button event listeners
|
331 |
for (const key of Object.keys(window.KIMI_CHARACTERS)) {
|
332 |
const promptInput = document.getElementById(`prompt-${key}`);
|
333 |
+
const saveButton = document.getElementById(`save-${key}`);
|
334 |
+
const resetButton = document.getElementById(`reset-${key}`);
|
335 |
+
|
336 |
if (promptInput) {
|
337 |
const prompt = await kimiDB.getSystemPromptForCharacter(key);
|
338 |
promptInput.value = prompt;
|
339 |
promptInput.disabled = key !== selectedCharacter;
|
340 |
}
|
341 |
+
|
342 |
+
// Save button event listener
|
343 |
+
if (saveButton) {
|
344 |
+
saveButton.addEventListener("click", async () => {
|
345 |
+
if (promptInput) {
|
346 |
+
await kimiDB.setSystemPromptForCharacter(key, promptInput.value);
|
347 |
+
|
348 |
+
// Visual feedback
|
349 |
+
const originalText = saveButton.textContent;
|
350 |
+
saveButton.textContent = "Saved!";
|
351 |
+
saveButton.classList.add("success");
|
352 |
+
saveButton.disabled = true;
|
353 |
+
|
354 |
+
setTimeout(() => {
|
355 |
+
saveButton.setAttribute("data-i18n", "save");
|
356 |
+
applyTranslations();
|
357 |
+
saveButton.classList.remove("success");
|
358 |
+
saveButton.disabled = false;
|
359 |
+
}, 1500);
|
360 |
+
|
361 |
+
// Refresh personality if this is the selected character
|
362 |
+
if (key === selectedCharacter && window.kimiLLM && window.kimiLLM.refreshMemoryContext) {
|
363 |
+
await window.kimiLLM.refreshMemoryContext();
|
364 |
+
}
|
365 |
+
}
|
366 |
+
});
|
367 |
+
}
|
368 |
+
|
369 |
+
// Reset button event listener
|
370 |
+
if (resetButton) {
|
371 |
+
resetButton.addEventListener("click", async () => {
|
372 |
+
const defaultPrompt = window.KIMI_CHARACTERS[key]?.defaultPrompt || "";
|
373 |
+
if (promptInput) {
|
374 |
+
promptInput.value = defaultPrompt;
|
375 |
+
await kimiDB.setSystemPromptForCharacter(key, defaultPrompt);
|
376 |
+
|
377 |
+
// Visual feedback
|
378 |
+
const originalText = resetButton.textContent;
|
379 |
+
resetButton.textContent = "Reset!";
|
380 |
+
resetButton.classList.add("animated");
|
381 |
+
resetButton.setAttribute("data-i18n", "reset_done");
|
382 |
+
applyTranslations();
|
383 |
+
|
384 |
+
setTimeout(() => {
|
385 |
+
resetButton.setAttribute("data-i18n", "reset_to_default");
|
386 |
+
applyTranslations();
|
387 |
+
resetButton.classList.remove("animated");
|
388 |
+
}, 1500);
|
389 |
+
|
390 |
+
// Refresh personality if this is the selected character
|
391 |
+
if (key === selectedCharacter && window.kimiLLM && window.kimiLLM.refreshMemoryContext) {
|
392 |
+
await window.kimiLLM.refreshMemoryContext();
|
393 |
+
}
|
394 |
+
}
|
395 |
+
});
|
396 |
+
}
|
397 |
}
|
398 |
characterGrid.querySelectorAll(".character-card").forEach(card => {
|
399 |
card.addEventListener("click", async () => {
|
|
|
402 |
const charKey = card.dataset.character;
|
403 |
for (const key of Object.keys(window.KIMI_CHARACTERS)) {
|
404 |
const promptInput = document.getElementById(`prompt-${key}`);
|
405 |
+
const saveButton = document.getElementById(`save-${key}`);
|
406 |
+
const resetButton = document.getElementById(`reset-${key}`);
|
407 |
+
|
408 |
if (promptInput) promptInput.disabled = key !== charKey;
|
409 |
+
if (saveButton) saveButton.disabled = key !== charKey;
|
410 |
+
if (resetButton) resetButton.disabled = key !== charKey;
|
411 |
}
|
412 |
updateFavorabilityLabel(charKey);
|
413 |
const chatHeaderName = document.querySelector(".chat-header span[data-i18n]");
|
|
|
772 |
const modelId = preferences.llmModelId || (window.kimiLLM ? window.kimiLLM.currentModel : "");
|
773 |
const genericKey = preferences.llmApiKey || "";
|
774 |
const selectedCharacter = preferences.selectedCharacter || "kimi";
|
775 |
+
const llmTemperature = preferences.llmTemperature !== undefined ? preferences.llmTemperature : 0.8;
|
776 |
+
const llmMaxTokens = preferences.llmMaxTokens !== undefined ? preferences.llmMaxTokens : 400;
|
777 |
const llmTopP = preferences.llmTopP !== undefined ? preferences.llmTopP : 0.9;
|
778 |
+
const llmFrequencyPenalty = preferences.llmFrequencyPenalty !== undefined ? preferences.llmFrequencyPenalty : 0.6;
|
779 |
+
const llmPresencePenalty = preferences.llmPresencePenalty !== undefined ? preferences.llmPresencePenalty : 0.5;
|
780 |
|
781 |
// Update UI with voice settings
|
782 |
const languageSelect = document.getElementById("language-selection");
|
|
|
795 |
// Batch load personality traits
|
796 |
const traitNames = ["affection", "playfulness", "intelligence", "empathy", "humor", "romance"];
|
797 |
const personality = await kimiDB.getPersonalityTraitsBatch(traitNames, selectedCharacter);
|
798 |
+
const defaults = [65, 55, 70, 75, 60, 50];
|
799 |
|
800 |
traitNames.forEach((trait, index) => {
|
801 |
const value = typeof personality[trait] === "number" ? personality[trait] : defaults[index];
|
|
|
839 |
: "API Key";
|
840 |
}
|
841 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
842 |
loadAvailableModels();
|
843 |
} catch (error) {
|
844 |
console.error("Error while loading settings:", error);
|
|
|
962 |
const llmMaxTokensSlider = document.getElementById("llm-max-tokens");
|
963 |
const llmMaxTokensValue = document.getElementById("llm-max-tokens-value");
|
964 |
if (llmMaxTokensSlider && llmMaxTokensValue && kimiDB) {
|
965 |
+
const saved = await kimiDB.getPreference("llmMaxTokens", 400);
|
966 |
llmMaxTokensSlider.value = saved;
|
967 |
llmMaxTokensValue.textContent = saved;
|
968 |
}
|
|
|
973 |
const llmTemperatureSlider = document.getElementById("llm-temperature");
|
974 |
const llmTemperatureValue = document.getElementById("llm-temperature-value");
|
975 |
if (llmTemperatureSlider && llmTemperatureValue && kimiDB) {
|
976 |
+
const saved = await kimiDB.getPreference("llmTemperature", 0.8);
|
977 |
llmTemperatureSlider.value = saved;
|
978 |
llmTemperatureValue.textContent = saved;
|
979 |
}
|
|
|
1548 |
if (llmTemperatureSlider) {
|
1549 |
const listener = e => {
|
1550 |
const validation = window.KimiValidationUtils?.validateRange(e.target.value, "llmTemperature");
|
1551 |
+
const value = validation?.value || parseFloat(e.target.value) || 0.8;
|
1552 |
|
1553 |
document.getElementById("llm-temperature-value").textContent = value;
|
1554 |
e.target.value = value;
|
|
|
1560 |
if (llmMaxTokensSlider) {
|
1561 |
const listener = e => {
|
1562 |
const validation = window.KimiValidationUtils?.validateRange(e.target.value, "llmMaxTokens");
|
1563 |
+
const value = validation?.value || parseInt(e.target.value) || 400;
|
1564 |
|
1565 |
document.getElementById("llm-max-tokens-value").textContent = value;
|
1566 |
e.target.value = value;
|
|
|
1584 |
if (llmFrequencyPenaltySlider) {
|
1585 |
const listener = e => {
|
1586 |
const validation = window.KimiValidationUtils?.validateRange(e.target.value, "llmFrequencyPenalty");
|
1587 |
+
const value = validation?.value || parseFloat(e.target.value) || 0.6;
|
1588 |
|
1589 |
document.getElementById("llm-frequency-penalty-value").textContent = value;
|
1590 |
e.target.value = value;
|
|
|
1596 |
if (llmPresencePenaltySlider) {
|
1597 |
const listener = e => {
|
1598 |
const validation = window.KimiValidationUtils?.validateRange(e.target.value, "llmPresencePenalty");
|
1599 |
+
const value = validation?.value || parseFloat(e.target.value) || 0.5;
|
1600 |
|
1601 |
document.getElementById("llm-presence-penalty-value").textContent = value;
|
1602 |
e.target.value = value;
|
kimi-js/kimi-script.js
CHANGED
@@ -42,12 +42,6 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
42 |
if (chatHeaderName && window.KIMI_CHARACTERS && window.KIMI_CHARACTERS[selectedCharacter]) {
|
43 |
chatHeaderName.setAttribute("data-i18n", `chat_with_${selectedCharacter}`);
|
44 |
}
|
45 |
-
const systemPromptInput = window.KimiDOMUtils.get("#system-prompt");
|
46 |
-
if (systemPromptInput && kimiDB.getSystemPromptForCharacter) {
|
47 |
-
const prompt = await kimiDB.getSystemPromptForCharacter(selectedCharacter);
|
48 |
-
systemPromptInput.value = prompt;
|
49 |
-
if (kimiLLM && kimiLLM.setSystemPrompt) kimiLLM.setSystemPrompt(prompt);
|
50 |
-
}
|
51 |
kimiLLM = new KimiLLMManager(kimiDB);
|
52 |
window.kimiLLM = kimiLLM;
|
53 |
await kimiLLM.init();
|
@@ -162,7 +156,14 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
162 |
ApiUi.setPresence(storedKey ? "#4caf50" : "#9e9e9e");
|
163 |
ApiUi.setTestPresence("#9e9e9e");
|
164 |
const savedBadge = ApiUi.savedBadge();
|
165 |
-
if (savedBadge)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
ApiUi.clearStatus();
|
167 |
// Enable/disable Test button according to validation (Ollama does not require API key)
|
168 |
const valid = !!(window.KIMI_VALIDATORS && window.KIMI_VALIDATORS.validateApiKey(storedKey || ""));
|
@@ -354,11 +355,8 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
354 |
const selectedCard = characterGrid ? characterGrid.querySelector(".character-card.selected") : null;
|
355 |
if (!selectedCard) return;
|
356 |
const charKey = selectedCard.dataset.character;
|
357 |
-
|
358 |
-
|
359 |
-
savedBadge.textContent = (window.kimiI18nManager && window.kimiI18nManager.t("saved_short")) || "Saved";
|
360 |
-
savedBadge.style.display = "inline";
|
361 |
-
}
|
362 |
const promptInput = window.KimiDOMUtils.get(`#prompt-${charKey}`);
|
363 |
const prompt = promptInput ? promptInput.value : "";
|
364 |
|
@@ -373,26 +371,7 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
373 |
if (window.voiceManager && window.voiceManager.updateSelectedCharacter) {
|
374 |
await window.voiceManager.updateSelectedCharacter();
|
375 |
}
|
376 |
-
|
377 |
-
// Only manage system prompt here. API key editing is handled globally to avoid duplicates.
|
378 |
-
|
379 |
-
// Clear API status when Base URL or Model ID change
|
380 |
-
const baseUrlInputEl = ApiUi.baseUrlInput();
|
381 |
-
if (baseUrlInputEl) {
|
382 |
-
baseUrlInputEl.addEventListener("input", () => {
|
383 |
-
ApiUi.clearStatus();
|
384 |
-
});
|
385 |
-
}
|
386 |
-
const modelIdInputEl = ApiUi.modelIdInput();
|
387 |
-
if (modelIdInputEl) {
|
388 |
-
modelIdInputEl.addEventListener("input", () => {
|
389 |
-
ApiUi.clearStatus();
|
390 |
-
});
|
391 |
-
}
|
392 |
-
window.kimiLLM.setSystemPrompt(prompt);
|
393 |
-
}
|
394 |
-
const systemPromptInput = window.KimiDOMUtils.get("#system-prompt");
|
395 |
-
if (systemPromptInput) systemPromptInput.value = prompt;
|
396 |
await window.loadCharacterSection();
|
397 |
if (settingsPanel && scrollTop !== null) {
|
398 |
requestAnimationFrame(() => {
|
@@ -535,14 +514,6 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
535 |
// Initialisation unifiée de la gestion des tabs
|
536 |
window.kimiTabManager = new window.KimiTabManager({
|
537 |
onTabChange: async tabName => {
|
538 |
-
if (tabName === "llm" || tabName === "api") {
|
539 |
-
if (window.kimiDB) {
|
540 |
-
const selectedCharacter = await window.kimiDB.getSelectedCharacter();
|
541 |
-
const prompt = await window.kimiDB.getSystemPromptForCharacter(selectedCharacter);
|
542 |
-
const systemPromptInput = document.getElementById("system-prompt");
|
543 |
-
if (systemPromptInput) systemPromptInput.value = prompt;
|
544 |
-
}
|
545 |
-
}
|
546 |
if (tabName === "personality") {
|
547 |
await window.loadCharacterSection();
|
548 |
}
|
@@ -552,75 +523,6 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
552 |
window.kimiUIEventManager = new window.KimiUIEventManager();
|
553 |
window.kimiUIEventManager.addEvent(window, "resize", window.updateTabsScrollIndicator);
|
554 |
|
555 |
-
const saveSystemPromptButton = document.getElementById("save-system-prompt");
|
556 |
-
if (saveSystemPromptButton) {
|
557 |
-
saveSystemPromptButton.addEventListener("click", async () => {
|
558 |
-
const selectedCharacter = await window.kimiDB.getPreference("selectedCharacter", "kimi");
|
559 |
-
const systemPromptInput = document.getElementById("system-prompt");
|
560 |
-
if (systemPromptInput && window.kimiDB.setSystemPromptForCharacter) {
|
561 |
-
await window.kimiDB.setSystemPromptForCharacter(selectedCharacter, systemPromptInput.value);
|
562 |
-
if (window.kimiLLM && window.kimiLLM.setSystemPrompt) window.kimiLLM.setSystemPrompt(systemPromptInput.value);
|
563 |
-
const originalText = saveSystemPromptButton.textContent;
|
564 |
-
saveSystemPromptButton.textContent = "Saved!";
|
565 |
-
saveSystemPromptButton.classList.add("success");
|
566 |
-
saveSystemPromptButton.disabled = true;
|
567 |
-
setTimeout(() => {
|
568 |
-
saveSystemPromptButton.setAttribute("data-i18n", "save");
|
569 |
-
applyTranslations();
|
570 |
-
// Re-enable the button after the success feedback
|
571 |
-
saveSystemPromptButton.disabled = false;
|
572 |
-
saveSystemPromptButton.classList.remove("success");
|
573 |
-
// Ensure text reflects i18n "save" state
|
574 |
-
// (applyTranslations above will set the text from locale)
|
575 |
-
}, 1500);
|
576 |
-
}
|
577 |
-
});
|
578 |
-
}
|
579 |
-
const resetSystemPromptButton = document.getElementById("reset-system-prompt");
|
580 |
-
const systemPromptInput = document.getElementById("system-prompt");
|
581 |
-
if (resetSystemPromptButton) {
|
582 |
-
resetSystemPromptButton.addEventListener("click", async () => {
|
583 |
-
const selectedCharacter = await window.kimiDB.getPreference("selectedCharacter", "kimi");
|
584 |
-
const characterDefault =
|
585 |
-
(window.KIMI_CHARACTERS && window.KIMI_CHARACTERS[selectedCharacter]?.defaultPrompt) ||
|
586 |
-
DEFAULT_SYSTEM_PROMPT ||
|
587 |
-
"";
|
588 |
-
if (systemPromptInput && window.kimiDB && window.kimiLLM) {
|
589 |
-
await window.kimiDB.setSystemPromptForCharacter(selectedCharacter, characterDefault);
|
590 |
-
systemPromptInput.value = characterDefault;
|
591 |
-
window.kimiLLM.setSystemPrompt(characterDefault);
|
592 |
-
resetSystemPromptButton.textContent = "Reset!";
|
593 |
-
resetSystemPromptButton.classList.add("animated");
|
594 |
-
resetSystemPromptButton.setAttribute("data-i18n", "reset_done");
|
595 |
-
applyTranslations();
|
596 |
-
setTimeout(() => {
|
597 |
-
resetSystemPromptButton.setAttribute("data-i18n", "reset_to_default");
|
598 |
-
applyTranslations();
|
599 |
-
}, 1500);
|
600 |
-
|
601 |
-
// After a reset, allow saving again
|
602 |
-
if (saveSystemPromptButton) {
|
603 |
-
saveSystemPromptButton.disabled = false;
|
604 |
-
saveSystemPromptButton.classList.remove("success");
|
605 |
-
saveSystemPromptButton.setAttribute("data-i18n", "save");
|
606 |
-
applyTranslations();
|
607 |
-
}
|
608 |
-
}
|
609 |
-
});
|
610 |
-
}
|
611 |
-
|
612 |
-
// Enable the Save button whenever the prompt content changes
|
613 |
-
if (systemPromptInput && saveSystemPromptButton) {
|
614 |
-
systemPromptInput.addEventListener("input", () => {
|
615 |
-
if (saveSystemPromptButton.disabled) {
|
616 |
-
saveSystemPromptButton.disabled = false;
|
617 |
-
}
|
618 |
-
saveSystemPromptButton.classList.remove("success");
|
619 |
-
saveSystemPromptButton.setAttribute("data-i18n", "save");
|
620 |
-
applyTranslations();
|
621 |
-
});
|
622 |
-
}
|
623 |
-
|
624 |
window.kimiFormManager = new window.KimiFormManager({ db: window.kimiDB, memory: window.kimiMemory });
|
625 |
|
626 |
const testVoiceButton = document.getElementById("test-voice");
|
@@ -715,11 +617,18 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
715 |
if (result.success) {
|
716 |
statusSpan.textContent = "Connection successful!";
|
717 |
statusSpan.style.color = "#4caf50";
|
|
|
718 |
const savedBadge = ApiUi.savedBadge();
|
719 |
if (savedBadge) {
|
720 |
-
|
721 |
-
|
722 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
723 |
}
|
724 |
|
725 |
if (result.response) {
|
@@ -775,9 +684,13 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
775 |
await window.kimiDB.setPreference(keyPref, value);
|
776 |
const savedBadge = ApiUi.savedBadge();
|
777 |
if (savedBadge) {
|
778 |
-
|
779 |
-
|
780 |
-
|
|
|
|
|
|
|
|
|
781 |
}
|
782 |
ApiUi.setPresence(value ? "#4caf50" : "#9e9e9e");
|
783 |
// Any key change invalidates previous test state
|
@@ -935,43 +848,9 @@ document.addEventListener("DOMContentLoaded", async function () {
|
|
935 |
await window.voiceManager.handleLanguageChange({ target: { value: selectedLang } });
|
936 |
}
|
937 |
|
938 |
-
|
939 |
-
|
940 |
-
|
941 |
-
let langInstruction;
|
942 |
-
|
943 |
-
switch (selectedLang) {
|
944 |
-
case "fr":
|
945 |
-
langInstruction = "Always reply exclusively in French. Do not mix languages.";
|
946 |
-
break;
|
947 |
-
case "es":
|
948 |
-
langInstruction = "Always reply exclusively in Spanish. Do not mix languages.";
|
949 |
-
break;
|
950 |
-
case "de":
|
951 |
-
langInstruction = "Always reply exclusively in German. Do not mix languages.";
|
952 |
-
break;
|
953 |
-
case "it":
|
954 |
-
langInstruction = "Always reply exclusively in Italian. Do not mix languages.";
|
955 |
-
break;
|
956 |
-
case "ja":
|
957 |
-
langInstruction = "Always reply exclusively in Japanese. Do not mix languages.";
|
958 |
-
break;
|
959 |
-
case "zh":
|
960 |
-
langInstruction = "Always reply exclusively in Chinese. Do not mix languages.";
|
961 |
-
break;
|
962 |
-
default:
|
963 |
-
langInstruction = "Always reply exclusively in English. Do not mix languages.";
|
964 |
-
break;
|
965 |
-
}
|
966 |
-
|
967 |
-
if (prompt) {
|
968 |
-
prompt = langInstruction + "\n" + prompt;
|
969 |
-
} else {
|
970 |
-
prompt = langInstruction;
|
971 |
-
}
|
972 |
-
window.kimiLLM.setSystemPrompt(prompt);
|
973 |
-
const systemPromptInput = document.getElementById("system-prompt");
|
974 |
-
if (systemPromptInput) systemPromptInput.value = prompt;
|
975 |
}
|
976 |
});
|
977 |
}
|
|
|
42 |
if (chatHeaderName && window.KIMI_CHARACTERS && window.KIMI_CHARACTERS[selectedCharacter]) {
|
43 |
chatHeaderName.setAttribute("data-i18n", `chat_with_${selectedCharacter}`);
|
44 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
kimiLLM = new KimiLLMManager(kimiDB);
|
46 |
window.kimiLLM = kimiLLM;
|
47 |
await kimiLLM.init();
|
|
|
156 |
ApiUi.setPresence(storedKey ? "#4caf50" : "#9e9e9e");
|
157 |
ApiUi.setTestPresence("#9e9e9e");
|
158 |
const savedBadge = ApiUi.savedBadge();
|
159 |
+
if (savedBadge) {
|
160 |
+
// Show only if provider requires a key and key exists
|
161 |
+
if (provider !== "ollama" && storedKey) {
|
162 |
+
savedBadge.style.display = "inline";
|
163 |
+
} else {
|
164 |
+
savedBadge.style.display = "none";
|
165 |
+
}
|
166 |
+
}
|
167 |
ApiUi.clearStatus();
|
168 |
// Enable/disable Test button according to validation (Ollama does not require API key)
|
169 |
const valid = !!(window.KIMI_VALIDATORS && window.KIMI_VALIDATORS.validateApiKey(storedKey || ""));
|
|
|
355 |
const selectedCard = characterGrid ? characterGrid.querySelector(".character-card.selected") : null;
|
356 |
if (!selectedCard) return;
|
357 |
const charKey = selectedCard.dataset.character;
|
358 |
+
// Removed incorrect usage of the API key saved badge here.
|
359 |
+
// Character save should not toggle the API key saved indicator.
|
|
|
|
|
|
|
360 |
const promptInput = window.KimiDOMUtils.get(`#prompt-${charKey}`);
|
361 |
const prompt = promptInput ? promptInput.value : "";
|
362 |
|
|
|
371 |
if (window.voiceManager && window.voiceManager.updateSelectedCharacter) {
|
372 |
await window.voiceManager.updateSelectedCharacter();
|
373 |
}
|
374 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
375 |
await window.loadCharacterSection();
|
376 |
if (settingsPanel && scrollTop !== null) {
|
377 |
requestAnimationFrame(() => {
|
|
|
514 |
// Initialisation unifiée de la gestion des tabs
|
515 |
window.kimiTabManager = new window.KimiTabManager({
|
516 |
onTabChange: async tabName => {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
517 |
if (tabName === "personality") {
|
518 |
await window.loadCharacterSection();
|
519 |
}
|
|
|
523 |
window.kimiUIEventManager = new window.KimiUIEventManager();
|
524 |
window.kimiUIEventManager.addEvent(window, "resize", window.updateTabsScrollIndicator);
|
525 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
526 |
window.kimiFormManager = new window.KimiFormManager({ db: window.kimiDB, memory: window.kimiMemory });
|
527 |
|
528 |
const testVoiceButton = document.getElementById("test-voice");
|
|
|
617 |
if (result.success) {
|
618 |
statusSpan.textContent = "Connection successful!";
|
619 |
statusSpan.style.color = "#4caf50";
|
620 |
+
// Only show saved badge if an actual non-empty API key is stored and provider requires one
|
621 |
const savedBadge = ApiUi.savedBadge();
|
622 |
if (savedBadge) {
|
623 |
+
const apiKeyInputEl = ApiUi.apiKeyInput();
|
624 |
+
const hasKey = apiKeyInputEl && apiKeyInputEl.value.trim().length > 0;
|
625 |
+
if (provider !== "ollama" && hasKey) {
|
626 |
+
savedBadge.textContent =
|
627 |
+
(window.kimiI18nManager && window.kimiI18nManager.t("saved_short")) || "Saved";
|
628 |
+
savedBadge.style.display = "inline";
|
629 |
+
} else {
|
630 |
+
savedBadge.style.display = "none";
|
631 |
+
}
|
632 |
}
|
633 |
|
634 |
if (result.response) {
|
|
|
684 |
await window.kimiDB.setPreference(keyPref, value);
|
685 |
const savedBadge = ApiUi.savedBadge();
|
686 |
if (savedBadge) {
|
687 |
+
if (value) {
|
688 |
+
savedBadge.textContent =
|
689 |
+
(window.kimiI18nManager && window.kimiI18nManager.t("saved_short")) || "Saved";
|
690 |
+
savedBadge.style.display = "inline";
|
691 |
+
} else {
|
692 |
+
savedBadge.style.display = "none";
|
693 |
+
}
|
694 |
}
|
695 |
ApiUi.setPresence(value ? "#4caf50" : "#9e9e9e");
|
696 |
// Any key change invalidates previous test state
|
|
|
848 |
await window.voiceManager.handleLanguageChange({ target: { value: selectedLang } });
|
849 |
}
|
850 |
|
851 |
+
// Refresh the personality prompt to include new language instruction
|
852 |
+
if (window.kimiLLM && window.kimiLLM.refreshMemoryContext) {
|
853 |
+
await window.kimiLLM.refreshMemoryContext();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
854 |
}
|
855 |
});
|
856 |
}
|
kimi-js/kimi-utils.js
CHANGED
@@ -24,12 +24,12 @@ window.KimiValidationUtils = {
|
|
24 |
voiceRate: { min: 0.5, max: 2, def: 1.1 },
|
25 |
voicePitch: { min: 0, max: 2, def: 1.0 },
|
26 |
voiceVolume: { min: 0, max: 1, def: 0.8 },
|
27 |
-
llmTemperature: { min: 0, max:
|
28 |
-
llmMaxTokens: { min: 1, max: 32000, def:
|
29 |
-
llmTopP: { min: 0, max: 1, def:
|
30 |
-
llmFrequencyPenalty: { min: 0, max: 2, def: 0 },
|
31 |
-
llmPresencePenalty: { min: 0, max: 2, def: 0 },
|
32 |
-
interfaceOpacity: { min: 0.1, max: 1, def: 0.
|
33 |
};
|
34 |
const b = bounds[key] || { min: 0, max: 100, def: 0 };
|
35 |
const v = window.KimiSecurityUtils
|
|
|
24 |
voiceRate: { min: 0.5, max: 2, def: 1.1 },
|
25 |
voicePitch: { min: 0, max: 2, def: 1.0 },
|
26 |
voiceVolume: { min: 0, max: 1, def: 0.8 },
|
27 |
+
llmTemperature: { min: 0, max: 1, def: 0.8 },
|
28 |
+
llmMaxTokens: { min: 1, max: 32000, def: 400 },
|
29 |
+
llmTopP: { min: 0, max: 1, def: 0.9 },
|
30 |
+
llmFrequencyPenalty: { min: 0, max: 2, def: 0.6 },
|
31 |
+
llmPresencePenalty: { min: 0, max: 2, def: 0.5 },
|
32 |
+
interfaceOpacity: { min: 0.1, max: 1, def: 0.8 }
|
33 |
};
|
34 |
const b = bounds[key] || { min: 0, max: 100, def: 0 };
|
35 |
const v = window.KimiSecurityUtils
|
kimi-js/kimi-voices.js
CHANGED
@@ -191,7 +191,7 @@ class KimiVoiceManager {
|
|
191 |
filteredVoices = this.availableVoices.filter(voice => voice.lang.toLowerCase().includes(this.selectedLanguage));
|
192 |
}
|
193 |
if (filteredVoices.length === 0) {
|
194 |
-
// As a last resort, use any available voice
|
195 |
filteredVoices = this.availableVoices;
|
196 |
}
|
197 |
|
@@ -214,22 +214,17 @@ class KimiVoiceManager {
|
|
214 |
}
|
215 |
}
|
216 |
|
217 |
-
if
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
filteredVoices[0] ||
|
227 |
-
this.availableVoices[0];
|
228 |
-
}
|
229 |
|
230 |
-
if
|
231 |
-
await this.db?.setPreference("selectedVoice", this.kimiEnglishVoice.name);
|
232 |
-
}
|
233 |
|
234 |
this.updateVoiceSelector();
|
235 |
this._initializingVoices = false;
|
@@ -276,12 +271,10 @@ class KimiVoiceManager {
|
|
276 |
async handleVoiceChange(e) {
|
277 |
if (e.target.value === "auto") {
|
278 |
await this.db?.setPreference("selectedVoice", "auto");
|
279 |
-
|
280 |
-
this.kimiEnglishVoice = null; // Reset to trigger auto-selection on next speak
|
281 |
} else {
|
282 |
this.kimiEnglishVoice = this.availableVoices.find(voice => voice.name === e.target.value);
|
283 |
await this.db?.setPreference("selectedVoice", e.target.value);
|
284 |
-
// Reduced logging to prevent noise
|
285 |
}
|
286 |
}
|
287 |
|
@@ -305,15 +298,7 @@ class KimiVoiceManager {
|
|
305 |
}
|
306 |
|
307 |
// Clean text for better speech synthesis
|
308 |
-
let processedText = text
|
309 |
-
.replace(/([\p{Emoji}\p{Extended_Pictographic}])/gu, " ")
|
310 |
-
.replace(/\.\.\./g, " pause ")
|
311 |
-
.replace(/\!+/g, " ! ")
|
312 |
-
.replace(/\?+/g, " ? ")
|
313 |
-
.replace(/\.{2,}/g, " pause ")
|
314 |
-
.replace(/[,;:]+/g, ", ")
|
315 |
-
.replace(/\s+/g, " ")
|
316 |
-
.trim();
|
317 |
|
318 |
// Detect emotional content for voice adjustments
|
319 |
let customRate = options.rate;
|
@@ -452,6 +437,67 @@ class KimiVoiceManager {
|
|
452 |
this.speechSynthesis.speak(utterance);
|
453 |
}
|
454 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
455 |
// Intelligently calculate synthesis duration
|
456 |
calculateSpeechDuration(text, rate = 0.9) {
|
457 |
const baseWordsPerMinute = 150;
|
|
|
191 |
filteredVoices = this.availableVoices.filter(voice => voice.lang.toLowerCase().includes(this.selectedLanguage));
|
192 |
}
|
193 |
if (filteredVoices.length === 0) {
|
194 |
+
// As a last resort, use any available voice
|
195 |
filteredVoices = this.availableVoices;
|
196 |
}
|
197 |
|
|
|
214 |
}
|
215 |
}
|
216 |
|
217 |
+
// Prefer female voices if available, otherwise fallback
|
218 |
+
const femaleVoice = filteredVoices.find(
|
219 |
+
voice =>
|
220 |
+
voice.name.toLowerCase().includes("female") ||
|
221 |
+
(voice.gender && voice.gender.toLowerCase() === "female") ||
|
222 |
+
voice.name.toLowerCase().includes("woman") ||
|
223 |
+
voice.name.toLowerCase().includes("girl")
|
224 |
+
);
|
225 |
+
this.kimiEnglishVoice = femaleVoice || filteredVoices[0] || this.availableVoices[0];
|
|
|
|
|
|
|
226 |
|
227 |
+
// Do not overwrite "auto" preference here; only update if user selects a specific voice
|
|
|
|
|
228 |
|
229 |
this.updateVoiceSelector();
|
230 |
this._initializingVoices = false;
|
|
|
271 |
async handleVoiceChange(e) {
|
272 |
if (e.target.value === "auto") {
|
273 |
await this.db?.setPreference("selectedVoice", "auto");
|
274 |
+
this.kimiEnglishVoice = null; // Trigger auto-selection next time
|
|
|
275 |
} else {
|
276 |
this.kimiEnglishVoice = this.availableVoices.find(voice => voice.name === e.target.value);
|
277 |
await this.db?.setPreference("selectedVoice", e.target.value);
|
|
|
278 |
}
|
279 |
}
|
280 |
|
|
|
298 |
}
|
299 |
|
300 |
// Clean text for better speech synthesis
|
301 |
+
let processedText = this._normalizeForSpeech(text);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
|
303 |
// Detect emotional content for voice adjustments
|
304 |
let customRate = options.rate;
|
|
|
437 |
this.speechSynthesis.speak(utterance);
|
438 |
}
|
439 |
|
440 |
+
/**
|
441 |
+
* Normalize raw model text into something natural for browser speech synthesis.
|
442 |
+
* Goals:
|
443 |
+
* - Remove emojis / pictographs (engines try to read them literally)
|
444 |
+
* - Collapse excessive punctuation while preserving rhythm
|
445 |
+
* - Convert ellipses to a Unicode ellipsis (…)
|
446 |
+
* - Remove markdown / formatting artifacts (* _ ~ ` # [] <> etc.)
|
447 |
+
* - Remove stray markup like **bold**, inline code, URLs parentheses clutter
|
448 |
+
* - Keep meaningful punctuation (. , ! ? ; :)
|
449 |
+
* - Avoid inserting artificial words (e.g., "pause")
|
450 |
+
*/
|
451 |
+
_normalizeForSpeech(raw) {
|
452 |
+
if (!raw) return "";
|
453 |
+
let txt = raw;
|
454 |
+
// Remove URLs completely (they sound awkward) – keep none.
|
455 |
+
txt = txt.replace(/https?:\/\/\S+/gi, " ");
|
456 |
+
// Remove markdown code blocks and inline code markers
|
457 |
+
txt = txt.replace(/`{3}[\s\S]*?`{3}/g, " "); // fenced blocks
|
458 |
+
txt = txt.replace(/`([^`]+)`/g, "$1"); // inline code unwrap
|
459 |
+
// Remove emphasis markers (*, _, ~) while keeping inner text
|
460 |
+
txt = txt.replace(/\*{1,3}([^*]+)\*{1,3}/g, "$1");
|
461 |
+
txt = txt.replace(/_{1,3}([^_]+)_{1,3}/g, "$1");
|
462 |
+
txt = txt.replace(/~{1,2}([^~]+)~{1,2}/g, "$1");
|
463 |
+
// Strip remaining markdown heading symbols at line starts
|
464 |
+
txt = txt.replace(/^\s{0,3}#{1,6}\s+/gm, "");
|
465 |
+
// Remove HTML/XML tags
|
466 |
+
txt = txt.replace(/<[^>]+>/g, " ");
|
467 |
+
// Remove brackets content if it is link style [text](url)
|
468 |
+
txt = txt.replace(/\[([^\]]+)\]\([^)]*\)/g, "$1");
|
469 |
+
// Remove leftover standalone brackets
|
470 |
+
txt = txt.replace(/[\[\]<>]/g, " ");
|
471 |
+
// Remove emojis / pictographic chars
|
472 |
+
txt = txt.replace(/[\p{Emoji}\p{Extended_Pictographic}]/gu, " ");
|
473 |
+
// Normalize ellipses: sequences of 3+ dots -> single ellipsis surrounded by light spaces
|
474 |
+
txt = txt.replace(/\.{3,}/g, " … ");
|
475 |
+
// Replace double dots with single period + space
|
476 |
+
txt = txt.replace(/\.\./g, ". ");
|
477 |
+
// Collapse multiple exclamation/question marks to single (keeps expressiveness but avoids stutter)
|
478 |
+
txt = txt.replace(/!{2,}/g, "!");
|
479 |
+
txt = txt.replace(/\?{2,}/g, "?");
|
480 |
+
// Space after sentence punctuation if missing
|
481 |
+
txt = txt.replace(/([.!?])([^\s\d])/g, "$1 $2");
|
482 |
+
// Replace underscores or asterisks still present with spaces
|
483 |
+
txt = txt.replace(/[*_]{2,}/g, " ");
|
484 |
+
// Remove stray backticks
|
485 |
+
txt = txt.replace(/`+/g, " ");
|
486 |
+
// Collapse mixed punctuation like ?!?! to a single terminal symbol keeping first char
|
487 |
+
txt = txt.replace(/([!?]){2,}/g, "$1");
|
488 |
+
// Remove repeated commas / semicolons / colons
|
489 |
+
txt = txt.replace(/,{2,}/g, ",");
|
490 |
+
txt = txt.replace(/;{2,}/g, ";");
|
491 |
+
txt = txt.replace(/:{2,}/g, ":");
|
492 |
+
// Remove leading/trailing punctuation clusters
|
493 |
+
txt = txt.replace(/^[\s.,;:!?]+/, "").replace(/[\s.,;:!?]+$/, "");
|
494 |
+
// Collapse whitespace
|
495 |
+
txt = txt.replace(/\s+/g, " ");
|
496 |
+
// Final trim
|
497 |
+
txt = txt.trim();
|
498 |
+
return txt;
|
499 |
+
}
|
500 |
+
|
501 |
// Intelligently calculate synthesis duration
|
502 |
calculateSpeechDuration(text, rate = 0.9) {
|
503 |
const baseWordsPerMinute = 150;
|
kimi-locale/de.json
CHANGED
@@ -113,7 +113,6 @@
|
|
113 |
"response_cold_3": "Was willst du?",
|
114 |
"response_cold_4": "Ich bin hier.",
|
115 |
"response_cold_5": "Wie kann ich dir helfen?",
|
116 |
-
"system_prompt": "System-Prompt",
|
117 |
"system_prompt_kimi": "Kimi System-Prompt",
|
118 |
"system_prompt_bella": "Bella System-Prompt",
|
119 |
"system_prompt_rosa": "Rosa System-Prompt",
|
@@ -127,8 +126,6 @@
|
|
127 |
"presence_penalty": "Anwesenheitsstrafe",
|
128 |
"db_size": "DB-Größe",
|
129 |
"storage_used": "Speicher verwendet",
|
130 |
-
"save-system-prompt": "System-Prompt Speichern",
|
131 |
-
"reset-system-prompt": "System-Prompt Zurücksetzen",
|
132 |
"saved": "Gespeichert!",
|
133 |
"saved_short": "Gespeichert",
|
134 |
"api_key_help_title": "Gespeichert = Ihr API-Schlüssel ist für diesen Anbieter gespeichert. Verwenden Sie ‘Test API Key’, um die Verbindung zu prüfen.",
|
@@ -207,7 +204,6 @@
|
|
207 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
208 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
209 |
"test_api_key": "API-Schlüssel testen",
|
210 |
-
"system_prompt_placeholder": "Fügen Sie hier Ihren System-Prompt hinzu...",
|
211 |
"theme_purple": "Mystic Purple (Standard)",
|
212 |
"theme_dark": "Dunkle Nacht",
|
213 |
"theme_blue": "Ozeanblau",
|
|
|
113 |
"response_cold_3": "Was willst du?",
|
114 |
"response_cold_4": "Ich bin hier.",
|
115 |
"response_cold_5": "Wie kann ich dir helfen?",
|
|
|
116 |
"system_prompt_kimi": "Kimi System-Prompt",
|
117 |
"system_prompt_bella": "Bella System-Prompt",
|
118 |
"system_prompt_rosa": "Rosa System-Prompt",
|
|
|
126 |
"presence_penalty": "Anwesenheitsstrafe",
|
127 |
"db_size": "DB-Größe",
|
128 |
"storage_used": "Speicher verwendet",
|
|
|
|
|
129 |
"saved": "Gespeichert!",
|
130 |
"saved_short": "Gespeichert",
|
131 |
"api_key_help_title": "Gespeichert = Ihr API-Schlüssel ist für diesen Anbieter gespeichert. Verwenden Sie ‘Test API Key’, um die Verbindung zu prüfen.",
|
|
|
204 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
205 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
206 |
"test_api_key": "API-Schlüssel testen",
|
|
|
207 |
"theme_purple": "Mystic Purple (Standard)",
|
208 |
"theme_dark": "Dunkle Nacht",
|
209 |
"theme_blue": "Ozeanblau",
|
kimi-locale/en.json
CHANGED
@@ -113,7 +113,6 @@
|
|
113 |
"response_cold_3": "What do you want?",
|
114 |
"response_cold_4": "I am here.",
|
115 |
"response_cold_5": "How can I help you?",
|
116 |
-
"system_prompt": "System Prompt",
|
117 |
"system_prompt_kimi": "Kimi System Prompt",
|
118 |
"system_prompt_bella": "Bella System Prompt",
|
119 |
"system_prompt_rosa": "Rosa System Prompt",
|
@@ -127,8 +126,6 @@
|
|
127 |
"presence_penalty": "Presence Penalty",
|
128 |
"db_size": "DB Size",
|
129 |
"storage_used": "Storage used",
|
130 |
-
"save-system-prompt": "Save System Prompt",
|
131 |
-
"reset-system-prompt": "Reset System Prompt",
|
132 |
"saved": "Saved!",
|
133 |
"saved_short": "Saved",
|
134 |
"api_key_help_title": "Saved = your API key is stored for this provider. Use Test API Key to verify the connection.",
|
@@ -205,7 +202,6 @@
|
|
205 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
206 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
207 |
"test_api_key": "Test API Key",
|
208 |
-
"system_prompt_placeholder": "Add your custom system prompt here...",
|
209 |
"theme_purple": "Mystic Purple (Default)",
|
210 |
"theme_dark": "Dark Night",
|
211 |
"theme_blue": "Ocean Blue",
|
|
|
113 |
"response_cold_3": "What do you want?",
|
114 |
"response_cold_4": "I am here.",
|
115 |
"response_cold_5": "How can I help you?",
|
|
|
116 |
"system_prompt_kimi": "Kimi System Prompt",
|
117 |
"system_prompt_bella": "Bella System Prompt",
|
118 |
"system_prompt_rosa": "Rosa System Prompt",
|
|
|
126 |
"presence_penalty": "Presence Penalty",
|
127 |
"db_size": "DB Size",
|
128 |
"storage_used": "Storage used",
|
|
|
|
|
129 |
"saved": "Saved!",
|
130 |
"saved_short": "Saved",
|
131 |
"api_key_help_title": "Saved = your API key is stored for this provider. Use Test API Key to verify the connection.",
|
|
|
202 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
203 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
204 |
"test_api_key": "Test API Key",
|
|
|
205 |
"theme_purple": "Mystic Purple (Default)",
|
206 |
"theme_dark": "Dark Night",
|
207 |
"theme_blue": "Ocean Blue",
|
kimi-locale/es.json
CHANGED
@@ -113,7 +113,6 @@
|
|
113 |
"response_cold_3": "¿Qué quieres?",
|
114 |
"response_cold_4": "Estoy aquí.",
|
115 |
"response_cold_5": "¿Cómo puedo ayudarte?",
|
116 |
-
"system_prompt": "Prompt del Sistema",
|
117 |
"system_prompt_kimi": "Prompt del Sistema de Kimi",
|
118 |
"system_prompt_bella": "Prompt del Sistema de Bella",
|
119 |
"system_prompt_rosa": "Prompt del Sistema de Rosa",
|
@@ -127,8 +126,6 @@
|
|
127 |
"presence_penalty": "Penalización de Presencia",
|
128 |
"db_size": "Tamaño de la BD",
|
129 |
"storage_used": "Almacenamiento usado",
|
130 |
-
"save-system-prompt": "Guardar Prompt del Sistema",
|
131 |
-
"reset-system-prompt": "Restaurar Prompt del Sistema",
|
132 |
"saved": "¡Guardado!",
|
133 |
"saved_short": "Guardado",
|
134 |
"api_key_help_title": "Guardado = tu clave API está almacenada para este proveedor. Usa 'Test API Key' para verificar la conexión.",
|
@@ -207,7 +204,6 @@
|
|
207 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
208 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
209 |
"test_api_key": "Probar Clave API",
|
210 |
-
"system_prompt_placeholder": "Agrega aquí tu prompt del sistema...",
|
211 |
"theme_purple": "Púrpura Místico (Predeterminado)",
|
212 |
"theme_dark": "Noche Oscura",
|
213 |
"theme_blue": "Azul Océano",
|
|
|
113 |
"response_cold_3": "¿Qué quieres?",
|
114 |
"response_cold_4": "Estoy aquí.",
|
115 |
"response_cold_5": "¿Cómo puedo ayudarte?",
|
|
|
116 |
"system_prompt_kimi": "Prompt del Sistema de Kimi",
|
117 |
"system_prompt_bella": "Prompt del Sistema de Bella",
|
118 |
"system_prompt_rosa": "Prompt del Sistema de Rosa",
|
|
|
126 |
"presence_penalty": "Penalización de Presencia",
|
127 |
"db_size": "Tamaño de la BD",
|
128 |
"storage_used": "Almacenamiento usado",
|
|
|
|
|
129 |
"saved": "¡Guardado!",
|
130 |
"saved_short": "Guardado",
|
131 |
"api_key_help_title": "Guardado = tu clave API está almacenada para este proveedor. Usa 'Test API Key' para verificar la conexión.",
|
|
|
204 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
205 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
206 |
"test_api_key": "Probar Clave API",
|
|
|
207 |
"theme_purple": "Púrpura Místico (Predeterminado)",
|
208 |
"theme_dark": "Noche Oscura",
|
209 |
"theme_blue": "Azul Océano",
|
kimi-locale/fr.json
CHANGED
@@ -113,7 +113,6 @@
|
|
113 |
"response_cold_3": "Que veux-tu ?",
|
114 |
"response_cold_4": "Je suis là.",
|
115 |
"response_cold_5": "Comment puis-je t'aider ?",
|
116 |
-
"system_prompt": "Prompt système",
|
117 |
"system_prompt_kimi": "Prompt système de Kimi",
|
118 |
"system_prompt_bella": "Prompt système de Bella",
|
119 |
"system_prompt_rosa": "Prompt système de Rosa",
|
@@ -127,8 +126,6 @@
|
|
127 |
"presence_penalty": "Pénalité de présence",
|
128 |
"db_size": "Taille DB",
|
129 |
"storage_used": "Stockage utilisé",
|
130 |
-
"save-system-prompt": "Sauvegarder le prompt système",
|
131 |
-
"reset-system-prompt": "Réinitialiser le prompt système",
|
132 |
"saved": "Sauvegardé !",
|
133 |
"saved_short": "Sauvegardé",
|
134 |
"api_key_help_title": "Sauvegardé = votre clé API est stockée pour ce provider. Utilisez ‘Test API Key’ pour vérifier la connexion.",
|
@@ -205,7 +202,6 @@
|
|
205 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
206 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
207 |
"test_api_key": "Tester la Clé API",
|
208 |
-
"system_prompt_placeholder": "Ajoutez ici votre prompt système personnalisé...",
|
209 |
"theme_purple": "Mystic Purple (Défaut)",
|
210 |
"theme_dark": "Nuit Sombre",
|
211 |
"theme_blue": "Bleu Océan",
|
|
|
113 |
"response_cold_3": "Que veux-tu ?",
|
114 |
"response_cold_4": "Je suis là.",
|
115 |
"response_cold_5": "Comment puis-je t'aider ?",
|
|
|
116 |
"system_prompt_kimi": "Prompt système de Kimi",
|
117 |
"system_prompt_bella": "Prompt système de Bella",
|
118 |
"system_prompt_rosa": "Prompt système de Rosa",
|
|
|
126 |
"presence_penalty": "Pénalité de présence",
|
127 |
"db_size": "Taille DB",
|
128 |
"storage_used": "Stockage utilisé",
|
|
|
|
|
129 |
"saved": "Sauvegardé !",
|
130 |
"saved_short": "Sauvegardé",
|
131 |
"api_key_help_title": "Sauvegardé = votre clé API est stockée pour ce provider. Utilisez ‘Test API Key’ pour vérifier la connexion.",
|
|
|
202 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
203 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
204 |
"test_api_key": "Tester la Clé API",
|
|
|
205 |
"theme_purple": "Mystic Purple (Défaut)",
|
206 |
"theme_dark": "Nuit Sombre",
|
207 |
"theme_blue": "Bleu Océan",
|
kimi-locale/it.json
CHANGED
@@ -113,7 +113,6 @@
|
|
113 |
"response_cold_3": "Cosa vuoi?",
|
114 |
"response_cold_4": "Sono qui.",
|
115 |
"response_cold_5": "Come posso aiutarti?",
|
116 |
-
"system_prompt": "Prompt di Sistema",
|
117 |
"system_prompt_kimi": "Prompt di Sistema di Kimi",
|
118 |
"system_prompt_bella": "Prompt di Sistema di Bella",
|
119 |
"system_prompt_rosa": "Prompt di Sistema di Rosa",
|
@@ -127,8 +126,6 @@
|
|
127 |
"presence_penalty": "Penalità di Presenza",
|
128 |
"db_size": "Dimensione DB",
|
129 |
"storage_used": "Spazio utilizzato",
|
130 |
-
"save-system-prompt": "Salva Prompt di Sistema",
|
131 |
-
"reset-system-prompt": "Ripristina Prompt di Sistema",
|
132 |
"saved": "Salvato!",
|
133 |
"saved_short": "Salvato",
|
134 |
"api_key_help_title": "Saved = your API key is stored for this provider. Use ‘Test API Key’ to verify the connection.",
|
@@ -207,7 +204,6 @@
|
|
207 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
208 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
209 |
"test_api_key": "Testa API Key",
|
210 |
-
"system_prompt_placeholder": "Aggiungi qui il tuo system prompt personalizzato...",
|
211 |
"theme_purple": "Mystic Purple (Predefinito)",
|
212 |
"theme_dark": "Notte Scura",
|
213 |
"theme_blue": "Blu Oceano",
|
|
|
113 |
"response_cold_3": "Cosa vuoi?",
|
114 |
"response_cold_4": "Sono qui.",
|
115 |
"response_cold_5": "Come posso aiutarti?",
|
|
|
116 |
"system_prompt_kimi": "Prompt di Sistema di Kimi",
|
117 |
"system_prompt_bella": "Prompt di Sistema di Bella",
|
118 |
"system_prompt_rosa": "Prompt di Sistema di Rosa",
|
|
|
126 |
"presence_penalty": "Penalità di Presenza",
|
127 |
"db_size": "Dimensione DB",
|
128 |
"storage_used": "Spazio utilizzato",
|
|
|
|
|
129 |
"saved": "Salvato!",
|
130 |
"saved_short": "Salvato",
|
131 |
"api_key_help_title": "Saved = your API key is stored for this provider. Use ‘Test API Key’ to verify the connection.",
|
|
|
204 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
205 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
206 |
"test_api_key": "Testa API Key",
|
|
|
207 |
"theme_purple": "Mystic Purple (Predefinito)",
|
208 |
"theme_dark": "Notte Scura",
|
209 |
"theme_blue": "Blu Oceano",
|
kimi-locale/ja.json
CHANGED
@@ -113,7 +113,6 @@
|
|
113 |
"response_cold_3": "何が欲しいですか?",
|
114 |
"response_cold_4": "私はここにいます。",
|
115 |
"response_cold_5": "どのようにお手伝いできますか?",
|
116 |
-
"system_prompt": "システムプロンプト",
|
117 |
"system_prompt_kimi": "Kimiシステムプロンプト",
|
118 |
"system_prompt_bella": "Bellaシステムプロンプト",
|
119 |
"system_prompt_rosa": "Rosaシステムプロンプト",
|
@@ -127,8 +126,6 @@
|
|
127 |
"presence_penalty": "存在ペナルティ",
|
128 |
"db_size": "DBサイズ",
|
129 |
"storage_used": "使用ストレージ",
|
130 |
-
"save-system-prompt": "システムプロンプトを保存",
|
131 |
-
"reset-system-prompt": "システムプロンプトをリセット",
|
132 |
"saved": "保存されました!",
|
133 |
"saved_short": "保存",
|
134 |
"api_key_help_title": "保存 = このプロバイダー用のAPIキーが保存されました。接続確認には『Test API Key』を使用してください。",
|
@@ -207,7 +204,6 @@
|
|
207 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
208 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
209 |
"test_api_key": "APIキーをテスト",
|
210 |
-
"system_prompt_placeholder": "ここにカスタムシステムプロンプトを追加...",
|
211 |
"theme_purple": "ミスティックパープル (デフォルト)",
|
212 |
"theme_dark": "ダークナイト",
|
213 |
"theme_blue": "オーシャンブルー",
|
|
|
113 |
"response_cold_3": "何が欲しいですか?",
|
114 |
"response_cold_4": "私はここにいます。",
|
115 |
"response_cold_5": "どのようにお手伝いできますか?",
|
|
|
116 |
"system_prompt_kimi": "Kimiシステムプロンプト",
|
117 |
"system_prompt_bella": "Bellaシステムプロンプト",
|
118 |
"system_prompt_rosa": "Rosaシステムプロンプト",
|
|
|
126 |
"presence_penalty": "存在ペナルティ",
|
127 |
"db_size": "DBサイズ",
|
128 |
"storage_used": "使用ストレージ",
|
|
|
|
|
129 |
"saved": "保存されました!",
|
130 |
"saved_short": "保存",
|
131 |
"api_key_help_title": "保存 = このプロバイダー用のAPIキーが保存されました。接続確認には『Test API Key』を使用してください。",
|
|
|
204 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
205 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
206 |
"test_api_key": "APIキーをテスト",
|
|
|
207 |
"theme_purple": "ミスティックパープル (デフォルト)",
|
208 |
"theme_dark": "ダークナイト",
|
209 |
"theme_blue": "オーシャンブルー",
|
kimi-locale/zh.json
CHANGED
@@ -113,7 +113,6 @@
|
|
113 |
"response_cold_3": "你想要什么?",
|
114 |
"response_cold_4": "我在这里。",
|
115 |
"response_cold_5": "我如何能帮助你?",
|
116 |
-
"system_prompt": "系统提示",
|
117 |
"system_prompt_kimi": "Kimi系统提示",
|
118 |
"system_prompt_bella": "Bella系统提示",
|
119 |
"system_prompt_rosa": "Rosa系统提示",
|
@@ -127,8 +126,6 @@
|
|
127 |
"presence_penalty": "存在惩罚",
|
128 |
"db_size": "数据库大小",
|
129 |
"storage_used": "已使用存储",
|
130 |
-
"save-system-prompt": "保存系统提示",
|
131 |
-
"reset-system-prompt": "重置系统提示",
|
132 |
"saved": "已保存!",
|
133 |
"saved_short": "已保存",
|
134 |
"api_key_help_title": "已保存 = 您的API密钥已为该提供商保存。使用“Test API Key”验证连接。",
|
@@ -207,7 +204,6 @@
|
|
207 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
208 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
209 |
"test_api_key": "测试 API 密钥",
|
210 |
-
"system_prompt_placeholder": "在此添加自定义系统提示...",
|
211 |
"theme_purple": "神秘紫 (默认)",
|
212 |
"theme_dark": "暗夜",
|
213 |
"theme_blue": "海洋蓝",
|
|
|
113 |
"response_cold_3": "你想要什么?",
|
114 |
"response_cold_4": "我在这里。",
|
115 |
"response_cold_5": "我如何能帮助你?",
|
|
|
116 |
"system_prompt_kimi": "Kimi系统提示",
|
117 |
"system_prompt_bella": "Bella系统提示",
|
118 |
"system_prompt_rosa": "Rosa系统提示",
|
|
|
126 |
"presence_penalty": "存在惩罚",
|
127 |
"db_size": "数据库大小",
|
128 |
"storage_used": "已使用存储",
|
|
|
|
|
129 |
"saved": "已保存!",
|
130 |
"saved_short": "已保存",
|
131 |
"api_key_help_title": "已保存 = 您的API密钥已为该提供商保存。使用“Test API Key”验证连接。",
|
|
|
204 |
"llm_base_url_placeholder": "https://api.openai.com/v1/chat/completions",
|
205 |
"llm_model_id_placeholder": "gpt-4o-mini | llama-3.1-8b-instruct | ...",
|
206 |
"test_api_key": "测试 API 密钥",
|
|
|
207 |
"theme_purple": "神秘紫 (默认)",
|
208 |
"theme_dark": "暗夜",
|
209 |
"theme_blue": "海洋蓝",
|