Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,80 +7,76 @@ from typing import Dict, Any, List
|
|
7 |
import time
|
8 |
|
9 |
app = Flask(__name__)
|
10 |
-
logging.basicConfig(level=logging.INFO)
|
11 |
|
12 |
# Configuration
|
13 |
OLLAMA_API_URL = os.getenv('OLLAMA_API_URL', 'https://huggingface.co/spaces/tommytracx/ollama-api')
|
14 |
-
DEFAULT_MODEL = os.getenv('DEFAULT_MODEL','llama2,llama2:13b,llama2:70b,codellama,neural-chat,gemma-3-270m').split(',')
|
15 |
MAX_TOKENS = int(os.getenv('MAX_TOKENS', '2048'))
|
16 |
TEMPERATURE = float(os.getenv('TEMPERATURE', '0.7'))
|
17 |
|
18 |
class OllamaClient:
|
19 |
def __init__(self, api_url: str):
|
20 |
self.api_url = api_url.rstrip('/')
|
21 |
-
self.available_models =
|
22 |
self.refresh_models()
|
23 |
|
24 |
-
def refresh_models(self):
|
25 |
-
"""Refresh the list of available models"""
|
26 |
try:
|
27 |
response = requests.get(f"{self.api_url}/api/models", timeout=10)
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
self.available_models = []
|
34 |
else:
|
35 |
-
|
|
|
36 |
except Exception as e:
|
37 |
logging.error(f"Error refreshing models: {e}")
|
38 |
-
self.available_models =
|
39 |
|
40 |
def list_models(self) -> List[str]:
|
41 |
-
"""
|
42 |
-
self.refresh_models()
|
43 |
return self.available_models
|
44 |
|
45 |
def generate(self, model_name: str, prompt: str, **kwargs) -> Dict[str, Any]:
|
46 |
-
"""Generate text using a model"""
|
|
|
|
|
|
|
47 |
try:
|
48 |
payload = {
|
49 |
"model": model_name,
|
50 |
"prompt": prompt,
|
51 |
-
"stream": False
|
|
|
52 |
}
|
53 |
-
|
|
|
|
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
"status": "success",
|
64 |
-
"response": data.get('response', ''),
|
65 |
-
"model": model_name,
|
66 |
-
"usage": data.get('usage', {})
|
67 |
-
}
|
68 |
-
else:
|
69 |
-
return {"status": "error", "message": data.get('message', 'Unknown error')}
|
70 |
-
else:
|
71 |
-
return {"status": "error", "message": f"Generation failed: {response.text}"}
|
72 |
except Exception as e:
|
|
|
73 |
return {"status": "error", "message": str(e)}
|
74 |
|
75 |
def health_check(self) -> Dict[str, Any]:
|
76 |
-
"""Check the health of the Ollama API"""
|
77 |
try:
|
78 |
response = requests.get(f"{self.api_url}/health", timeout=10)
|
79 |
-
|
80 |
-
|
81 |
-
else:
|
82 |
-
return {"status": "unhealthy", "error": f"HTTP {response.status_code}"}
|
83 |
except Exception as e:
|
|
|
84 |
return {"status": "unhealthy", "error": str(e)}
|
85 |
|
86 |
# Initialize Ollama client
|
@@ -95,228 +91,7 @@ HTML_TEMPLATE = '''
|
|
95 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
96 |
<title>OpenWebUI - Ollama Chat</title>
|
97 |
<style>
|
98 |
-
|
99 |
-
margin: 0;
|
100 |
-
padding: 0;
|
101 |
-
box-sizing: border-box;
|
102 |
-
}
|
103 |
-
|
104 |
-
body {
|
105 |
-
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
106 |
-
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
107 |
-
min-height: 100vh;
|
108 |
-
padding: 20px;
|
109 |
-
}
|
110 |
-
|
111 |
-
.container {
|
112 |
-
max-width: 1200px;
|
113 |
-
margin: 0 auto;
|
114 |
-
background: white;
|
115 |
-
border-radius: 20px;
|
116 |
-
box-shadow: 0 20px 40px rgba(0,0,0,0.1);
|
117 |
-
overflow: hidden;
|
118 |
-
}
|
119 |
-
|
120 |
-
.header {
|
121 |
-
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
122 |
-
color: white;
|
123 |
-
padding: 30px;
|
124 |
-
text-align: center;
|
125 |
-
}
|
126 |
-
|
127 |
-
.header h1 {
|
128 |
-
font-size: 2.5rem;
|
129 |
-
margin-bottom: 10px;
|
130 |
-
font-weight: 700;
|
131 |
-
}
|
132 |
-
|
133 |
-
.header p {
|
134 |
-
font-size: 1.1rem;
|
135 |
-
opacity: 0.9;
|
136 |
-
}
|
137 |
-
|
138 |
-
.controls {
|
139 |
-
padding: 20px 30px;
|
140 |
-
background: #f8f9fa;
|
141 |
-
border-bottom: 1px solid #e9ecef;
|
142 |
-
display: flex;
|
143 |
-
gap: 15px;
|
144 |
-
align-items: center;
|
145 |
-
flex-wrap: wrap;
|
146 |
-
}
|
147 |
-
|
148 |
-
.control-group {
|
149 |
-
display: flex;
|
150 |
-
align-items: center;
|
151 |
-
gap: 8px;
|
152 |
-
}
|
153 |
-
|
154 |
-
.control-group label {
|
155 |
-
font-weight: 600;
|
156 |
-
color: #495057;
|
157 |
-
min-width: 80px;
|
158 |
-
}
|
159 |
-
|
160 |
-
.control-group select,
|
161 |
-
.control-group input {
|
162 |
-
padding: 8px 12px;
|
163 |
-
border: 2px solid #e9ecef;
|
164 |
-
border-radius: 8px;
|
165 |
-
font-size: 14px;
|
166 |
-
transition: border-color 0.3s;
|
167 |
-
}
|
168 |
-
|
169 |
-
.control-group select:focus,
|
170 |
-
.control-group input:focus {
|
171 |
-
outline: none;
|
172 |
-
border-color: #667eea;
|
173 |
-
}
|
174 |
-
|
175 |
-
.chat-container {
|
176 |
-
height: 500px;
|
177 |
-
overflow-y: auto;
|
178 |
-
padding: 20px;
|
179 |
-
background: #fafbfc;
|
180 |
-
}
|
181 |
-
|
182 |
-
.message {
|
183 |
-
margin-bottom: 20px;
|
184 |
-
display: flex;
|
185 |
-
gap: 15px;
|
186 |
-
}
|
187 |
-
|
188 |
-
.message.user {
|
189 |
-
flex-direction: row-reverse;
|
190 |
-
}
|
191 |
-
|
192 |
-
.message-avatar {
|
193 |
-
width: 40px;
|
194 |
-
height: 40px;
|
195 |
-
border-radius: 50%;
|
196 |
-
display: flex;
|
197 |
-
align-items: center;
|
198 |
-
justify-content: center;
|
199 |
-
font-weight: bold;
|
200 |
-
color: white;
|
201 |
-
flex-shrink: 0;
|
202 |
-
}
|
203 |
-
|
204 |
-
.message.user .message-avatar {
|
205 |
-
background: #667eea;
|
206 |
-
}
|
207 |
-
|
208 |
-
.message.assistant .message-avatar {
|
209 |
-
background: #28a745;
|
210 |
-
}
|
211 |
-
|
212 |
-
.message-content {
|
213 |
-
background: white;
|
214 |
-
padding: 15px 20px;
|
215 |
-
border-radius: 18px;
|
216 |
-
max-width: 70%;
|
217 |
-
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
|
218 |
-
line-height: 1.5;
|
219 |
-
}
|
220 |
-
|
221 |
-
.message.user .message-content {
|
222 |
-
background: #667eea;
|
223 |
-
color: white;
|
224 |
-
}
|
225 |
-
|
226 |
-
.message.assistant .message-content {
|
227 |
-
background: white;
|
228 |
-
color: #333;
|
229 |
-
}
|
230 |
-
|
231 |
-
.input-container {
|
232 |
-
padding: 20px 30px;
|
233 |
-
background: white;
|
234 |
-
border-top: 1px solid #e9ecef;
|
235 |
-
}
|
236 |
-
|
237 |
-
.input-form {
|
238 |
-
display: flex;
|
239 |
-
gap: 15px;
|
240 |
-
}
|
241 |
-
|
242 |
-
.input-field {
|
243 |
-
flex: 1;
|
244 |
-
padding: 15px 20px;
|
245 |
-
border: 2px solid #e9ecef;
|
246 |
-
border-radius: 25px;
|
247 |
-
font-size: 16px;
|
248 |
-
transition: border-color 0.3s;
|
249 |
-
resize: none;
|
250 |
-
min-height: 50px;
|
251 |
-
max-height: 120px;
|
252 |
-
}
|
253 |
-
|
254 |
-
.input-field:focus {
|
255 |
-
outline: none;
|
256 |
-
border-color: #667eea;
|
257 |
-
}
|
258 |
-
|
259 |
-
.send-button {
|
260 |
-
padding: 15px 30px;
|
261 |
-
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
262 |
-
color: white;
|
263 |
-
border: none;
|
264 |
-
border-radius: 25px;
|
265 |
-
font-size: 16px;
|
266 |
-
font-weight: 600;
|
267 |
-
cursor: pointer;
|
268 |
-
transition: transform 0.2s;
|
269 |
-
min-width: 100px;
|
270 |
-
}
|
271 |
-
|
272 |
-
.send-button:hover {
|
273 |
-
transform: translateY(-2px);
|
274 |
-
}
|
275 |
-
|
276 |
-
.send-button:disabled {
|
277 |
-
opacity: 0.6;
|
278 |
-
cursor: not-allowed;
|
279 |
-
transform: none;
|
280 |
-
}
|
281 |
-
|
282 |
-
.status {
|
283 |
-
text-align: center;
|
284 |
-
padding: 10px;
|
285 |
-
font-size: 14px;
|
286 |
-
color: #6c757d;
|
287 |
-
}
|
288 |
-
|
289 |
-
.status.error {
|
290 |
-
color: #dc3545;
|
291 |
-
}
|
292 |
-
|
293 |
-
.status.success {
|
294 |
-
color: #28a745;
|
295 |
-
}
|
296 |
-
|
297 |
-
.typing-indicator {
|
298 |
-
display: none;
|
299 |
-
padding: 15px 20px;
|
300 |
-
background: white;
|
301 |
-
border-radius: 18px;
|
302 |
-
color: #6c757d;
|
303 |
-
font-style: italic;
|
304 |
-
}
|
305 |
-
|
306 |
-
@media (max-width: 768px) {
|
307 |
-
.controls {
|
308 |
-
flex-direction: column;
|
309 |
-
align-items: stretch;
|
310 |
-
}
|
311 |
-
|
312 |
-
.control-group {
|
313 |
-
justify-content: space-between;
|
314 |
-
}
|
315 |
-
|
316 |
-
.message-content {
|
317 |
-
max-width: 85%;
|
318 |
-
}
|
319 |
-
}
|
320 |
</style>
|
321 |
</head>
|
322 |
<body>
|
@@ -330,16 +105,14 @@ HTML_TEMPLATE = '''
|
|
330 |
<div class="control-group">
|
331 |
<label for="model-select">Model:</label>
|
332 |
<select id="model-select">
|
333 |
-
<option value="">
|
334 |
</select>
|
335 |
</div>
|
336 |
-
|
337 |
<div class="control-group">
|
338 |
<label for="temperature">Temperature:</label>
|
339 |
<input type="range" id="temperature" min="0" max="2" step="0.1" value="0.7">
|
340 |
<span id="temp-value">0.7</span>
|
341 |
</div>
|
342 |
-
|
343 |
<div class="control-group">
|
344 |
<label for="max-tokens">Max Tokens:</label>
|
345 |
<input type="number" id="max-tokens" min="1" max="4096" value="2048">
|
@@ -379,50 +152,49 @@ HTML_TEMPLATE = '''
|
|
379 |
<script>
|
380 |
let conversationHistory = [];
|
381 |
|
382 |
-
// Initialize the interface
|
383 |
document.addEventListener('DOMContentLoaded', function() {
|
384 |
loadModels();
|
385 |
setupEventListeners();
|
386 |
autoResizeTextarea();
|
387 |
});
|
388 |
|
389 |
-
function loadModels() {
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
modelSelect.
|
408 |
-
}
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
showStatus('
|
413 |
-
}
|
|
|
|
|
|
|
|
|
|
|
414 |
}
|
415 |
|
416 |
function setupEventListeners() {
|
417 |
-
// Chat form submission
|
418 |
document.getElementById('chat-form').addEventListener('submit', handleSubmit);
|
419 |
-
|
420 |
-
// Temperature slider
|
421 |
document.getElementById('temperature').addEventListener('input', function() {
|
422 |
document.getElementById('temp-value').textContent = this.value;
|
423 |
});
|
424 |
-
|
425 |
-
// Auto-resize textarea
|
426 |
document.getElementById('message-input').addEventListener('input', autoResizeTextarea);
|
427 |
}
|
428 |
|
@@ -432,7 +204,7 @@ HTML_TEMPLATE = '''
|
|
432 |
textarea.style.height = Math.min(textarea.scrollHeight, 120) + 'px';
|
433 |
}
|
434 |
|
435 |
-
function handleSubmit(e) {
|
436 |
e.preventDefault();
|
437 |
|
438 |
const messageInput = document.getElementById('message-input');
|
@@ -449,16 +221,33 @@ HTML_TEMPLATE = '''
|
|
449 |
return;
|
450 |
}
|
451 |
|
452 |
-
// Add user message to chat
|
453 |
addMessage(message, 'user');
|
454 |
messageInput.value = '';
|
455 |
autoResizeTextarea();
|
456 |
-
|
457 |
-
// Show typing indicator
|
458 |
showTypingIndicator(true);
|
459 |
|
460 |
-
|
461 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
}
|
463 |
|
464 |
function addMessage(content, sender) {
|
@@ -477,53 +266,14 @@ HTML_TEMPLATE = '''
|
|
477 |
messageDiv.appendChild(avatar);
|
478 |
messageDiv.appendChild(messageContent);
|
479 |
chatContainer.appendChild(messageDiv);
|
480 |
-
|
481 |
-
// Scroll to bottom
|
482 |
chatContainer.scrollTop = chatContainer.scrollHeight;
|
483 |
|
484 |
-
// Add to conversation history
|
485 |
conversationHistory.push({ role: sender, content: content });
|
486 |
}
|
487 |
|
488 |
-
function sendMessage(message, model, temperature, maxTokens) {
|
489 |
-
const payload = {
|
490 |
-
model: model,
|
491 |
-
prompt: message,
|
492 |
-
temperature: temperature,
|
493 |
-
max_tokens: maxTokens
|
494 |
-
};
|
495 |
-
|
496 |
-
fetch('{{ ollama_api_url }}/api/generate', {
|
497 |
-
method: 'POST',
|
498 |
-
headers: {
|
499 |
-
'Content-Type': 'application/json'
|
500 |
-
},
|
501 |
-
body: JSON.stringify(payload)
|
502 |
-
})
|
503 |
-
.then(response => response.json())
|
504 |
-
.then(data => {
|
505 |
-
showTypingIndicator(false);
|
506 |
-
|
507 |
-
if (data.status === 'success') {
|
508 |
-
addMessage(data.response, 'assistant');
|
509 |
-
showStatus(`Response generated using ${model}`, 'success');
|
510 |
-
} else {
|
511 |
-
addMessage('Sorry, I encountered an error while processing your request.', 'assistant');
|
512 |
-
showStatus(`Error: ${data.message}`, 'error');
|
513 |
-
}
|
514 |
-
})
|
515 |
-
.catch(error => {
|
516 |
-
showTypingIndicator(false);
|
517 |
-
addMessage('Sorry, I encountered an error while processing your request.', 'assistant');
|
518 |
-
showStatus('Network error occurred', 'error');
|
519 |
-
console.error('Error:', error);
|
520 |
-
});
|
521 |
-
}
|
522 |
-
|
523 |
function showTypingIndicator(show) {
|
524 |
const indicator = document.getElementById('typing-indicator');
|
525 |
indicator.style.display = show ? 'block' : 'none';
|
526 |
-
|
527 |
if (show) {
|
528 |
const chatContainer = document.getElementById('chat-container');
|
529 |
chatContainer.scrollTop = chatContainer.scrollHeight;
|
@@ -534,8 +284,6 @@ HTML_TEMPLATE = '''
|
|
534 |
const statusDiv = document.getElementById('status');
|
535 |
statusDiv.textContent = message;
|
536 |
statusDiv.className = `status ${type}`;
|
537 |
-
|
538 |
-
// Clear status after 5 seconds
|
539 |
setTimeout(() => {
|
540 |
statusDiv.textContent = '';
|
541 |
statusDiv.className = 'status';
|
@@ -548,14 +296,12 @@ HTML_TEMPLATE = '''
|
|
548 |
|
549 |
@app.route('/')
|
550 |
def home():
|
551 |
-
"""Main chat interface"""
|
552 |
-
return render_template_string(HTML_TEMPLATE,
|
553 |
-
ollama_api_url=OLLAMA_API_URL,
|
554 |
-
default_model=DEFAULT_MODEL)
|
555 |
|
556 |
@app.route('/api/chat', methods=['POST'])
|
557 |
def chat():
|
558 |
-
"""Chat API endpoint"""
|
559 |
try:
|
560 |
data = request.get_json()
|
561 |
if not data or 'message' not in data or 'model' not in data:
|
@@ -566,20 +312,15 @@ def chat():
|
|
566 |
temperature = data.get('temperature', TEMPERATURE)
|
567 |
max_tokens = data.get('max_tokens', MAX_TOKENS)
|
568 |
|
569 |
-
result = ollama_client.generate(model, message,
|
570 |
-
|
571 |
-
max_tokens=max_tokens)
|
572 |
-
|
573 |
-
if result["status"] == "success":
|
574 |
-
return jsonify(result)
|
575 |
-
else:
|
576 |
-
return jsonify(result), 500
|
577 |
except Exception as e:
|
|
|
578 |
return jsonify({"status": "error", "message": str(e)}), 500
|
579 |
|
580 |
@app.route('/api/models', methods=['GET'])
|
581 |
def get_models():
|
582 |
-
"""Get available models"""
|
583 |
try:
|
584 |
models = ollama_client.list_models()
|
585 |
return jsonify({
|
@@ -588,11 +329,12 @@ def get_models():
|
|
588 |
"count": len(models)
|
589 |
})
|
590 |
except Exception as e:
|
|
|
591 |
return jsonify({"status": "error", "message": str(e)}), 500
|
592 |
|
593 |
@app.route('/health', methods=['GET'])
|
594 |
def health_check():
|
595 |
-
"""Health check endpoint"""
|
596 |
try:
|
597 |
ollama_health = ollama_client.health_check()
|
598 |
return jsonify({
|
@@ -601,6 +343,7 @@ def health_check():
|
|
601 |
"timestamp": time.time()
|
602 |
})
|
603 |
except Exception as e:
|
|
|
604 |
return jsonify({
|
605 |
"status": "unhealthy",
|
606 |
"error": str(e),
|
@@ -608,4 +351,4 @@ def health_check():
|
|
608 |
}), 500
|
609 |
|
610 |
if __name__ == '__main__':
|
611 |
-
app.run(host='0.0.0.0', port=7860, debug=False)
|
|
|
7 |
import time
|
8 |
|
9 |
app = Flask(__name__)
|
10 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
11 |
|
12 |
# Configuration
|
13 |
OLLAMA_API_URL = os.getenv('OLLAMA_API_URL', 'https://huggingface.co/spaces/tommytracx/ollama-api')
|
14 |
+
DEFAULT_MODEL = os.getenv('DEFAULT_MODEL', 'llama2,llama2:13b,llama2:70b,codellama,neural-chat,gemma-3-270m').split(',')
|
15 |
MAX_TOKENS = int(os.getenv('MAX_TOKENS', '2048'))
|
16 |
TEMPERATURE = float(os.getenv('TEMPERATURE', '0.7'))
|
17 |
|
18 |
class OllamaClient:
|
19 |
def __init__(self, api_url: str):
|
20 |
self.api_url = api_url.rstrip('/')
|
21 |
+
self.available_models = DEFAULT_MODEL # Initialize with default models
|
22 |
self.refresh_models()
|
23 |
|
24 |
+
def refresh_models(self) -> None:
|
25 |
+
"""Refresh the list of available models from the API, falling back to defaults on failure."""
|
26 |
try:
|
27 |
response = requests.get(f"{self.api_url}/api/models", timeout=10)
|
28 |
+
response.raise_for_status()
|
29 |
+
data = response.json()
|
30 |
+
if data.get('status') == 'success' and isinstance(data.get('models'), list):
|
31 |
+
self.available_models = data['models']
|
32 |
+
logging.info(f"Successfully fetched models: {self.available_models}")
|
|
|
33 |
else:
|
34 |
+
logging.warning(f"Invalid response format from API: {data}")
|
35 |
+
self.available_models = DEFAULT_MODEL
|
36 |
except Exception as e:
|
37 |
logging.error(f"Error refreshing models: {e}")
|
38 |
+
self.available_models = DEFAULT_MODEL
|
39 |
|
40 |
def list_models(self) -> List[str]:
|
41 |
+
"""Return the list of available models."""
|
|
|
42 |
return self.available_models
|
43 |
|
44 |
def generate(self, model_name: str, prompt: str, **kwargs) -> Dict[str, Any]:
|
45 |
+
"""Generate text using a model."""
|
46 |
+
if model_name not in self.available_models:
|
47 |
+
return {"status": "error", "message": f"Model {model_name} not available"}
|
48 |
+
|
49 |
try:
|
50 |
payload = {
|
51 |
"model": model_name,
|
52 |
"prompt": prompt,
|
53 |
+
"stream": False,
|
54 |
+
**kwargs
|
55 |
}
|
56 |
+
response = requests.post(f"{self.api_url}/api/generate", json=payload, timeout=120)
|
57 |
+
response.raise_for_status()
|
58 |
+
data = response.json()
|
59 |
|
60 |
+
if data.get('status') == 'success':
|
61 |
+
return {
|
62 |
+
"status": "success",
|
63 |
+
"response": data.get('response', ''),
|
64 |
+
"model": model_name,
|
65 |
+
"usage": data.get('usage', {})
|
66 |
+
}
|
67 |
+
return {"status": "error", "message": data.get('message', 'Unknown error')}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
except Exception as e:
|
69 |
+
logging.error(f"Error generating response: {e}")
|
70 |
return {"status": "error", "message": str(e)}
|
71 |
|
72 |
def health_check(self) -> Dict[str, Any]:
|
73 |
+
"""Check the health of the Ollama API."""
|
74 |
try:
|
75 |
response = requests.get(f"{self.api_url}/health", timeout=10)
|
76 |
+
response.raise_for_status()
|
77 |
+
return response.json()
|
|
|
|
|
78 |
except Exception as e:
|
79 |
+
logging.error(f"Health check failed: {e}")
|
80 |
return {"status": "unhealthy", "error": str(e)}
|
81 |
|
82 |
# Initialize Ollama client
|
|
|
91 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
92 |
<title>OpenWebUI - Ollama Chat</title>
|
93 |
<style>
|
94 |
+
/* [Previous CSS unchanged] */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
</style>
|
96 |
</head>
|
97 |
<body>
|
|
|
105 |
<div class="control-group">
|
106 |
<label for="model-select">Model:</label>
|
107 |
<select id="model-select">
|
108 |
+
<option value="">Select a model...</option>
|
109 |
</select>
|
110 |
</div>
|
|
|
111 |
<div class="control-group">
|
112 |
<label for="temperature">Temperature:</label>
|
113 |
<input type="range" id="temperature" min="0" max="2" step="0.1" value="0.7">
|
114 |
<span id="temp-value">0.7</span>
|
115 |
</div>
|
|
|
116 |
<div class="control-group">
|
117 |
<label for="max-tokens">Max Tokens:</label>
|
118 |
<input type="number" id="max-tokens" min="1" max="4096" value="2048">
|
|
|
152 |
<script>
|
153 |
let conversationHistory = [];
|
154 |
|
|
|
155 |
document.addEventListener('DOMContentLoaded', function() {
|
156 |
loadModels();
|
157 |
setupEventListeners();
|
158 |
autoResizeTextarea();
|
159 |
});
|
160 |
|
161 |
+
async function loadModels() {
|
162 |
+
const modelSelect = document.getElementById('model-select');
|
163 |
+
modelSelect.innerHTML = '<option value="">Loading models...</option>';
|
164 |
+
|
165 |
+
try {
|
166 |
+
const response = await fetch('/api/models');
|
167 |
+
const data = await response.json();
|
168 |
+
|
169 |
+
modelSelect.innerHTML = '<option value="">Select a model...</option>';
|
170 |
+
|
171 |
+
if (data.status === 'success' && data.models.length > 0) {
|
172 |
+
data.models.forEach(model => {
|
173 |
+
const option = document.createElement('option');
|
174 |
+
option.value = model;
|
175 |
+
option.textContent = model;
|
176 |
+
if (model === '{{ default_model[0] }}') {
|
177 |
+
option.selected = true;
|
178 |
+
}
|
179 |
+
modelSelect.appendChild(option);
|
180 |
+
});
|
181 |
+
showStatus('Models loaded successfully', 'success');
|
182 |
+
} else {
|
183 |
+
modelSelect.innerHTML = '<option value="">No models available</option>';
|
184 |
+
showStatus('No models available from API', 'error');
|
185 |
+
}
|
186 |
+
} catch (error) {
|
187 |
+
console.error('Error loading models:', error);
|
188 |
+
modelSelect.innerHTML = '<option value="">No models available</option>';
|
189 |
+
showStatus('Failed to load models: ' + error.message, 'error');
|
190 |
+
}
|
191 |
}
|
192 |
|
193 |
function setupEventListeners() {
|
|
|
194 |
document.getElementById('chat-form').addEventListener('submit', handleSubmit);
|
|
|
|
|
195 |
document.getElementById('temperature').addEventListener('input', function() {
|
196 |
document.getElementById('temp-value').textContent = this.value;
|
197 |
});
|
|
|
|
|
198 |
document.getElementById('message-input').addEventListener('input', autoResizeTextarea);
|
199 |
}
|
200 |
|
|
|
204 |
textarea.style.height = Math.min(textarea.scrollHeight, 120) + 'px';
|
205 |
}
|
206 |
|
207 |
+
async function handleSubmit(e) {
|
208 |
e.preventDefault();
|
209 |
|
210 |
const messageInput = document.getElementById('message-input');
|
|
|
221 |
return;
|
222 |
}
|
223 |
|
|
|
224 |
addMessage(message, 'user');
|
225 |
messageInput.value = '';
|
226 |
autoResizeTextarea();
|
|
|
|
|
227 |
showTypingIndicator(true);
|
228 |
|
229 |
+
try {
|
230 |
+
const response = await fetch('/api/chat', {
|
231 |
+
method: 'POST',
|
232 |
+
headers: { 'Content-Type': 'application/json' },
|
233 |
+
body: JSON.stringify({ model, message, temperature, max_tokens: maxTokens })
|
234 |
+
});
|
235 |
+
const data = await response.json();
|
236 |
+
|
237 |
+
showTypingIndicator(false);
|
238 |
+
|
239 |
+
if (data.status === 'success') {
|
240 |
+
addMessage(data.response, 'assistant');
|
241 |
+
showStatus(`Response generated using ${model}`, 'success');
|
242 |
+
} else {
|
243 |
+
addMessage('Sorry, I encountered an error while processing your request.', 'assistant');
|
244 |
+
showStatus(`Error: ${data.message}`, 'error');
|
245 |
+
}
|
246 |
+
} catch (error) {
|
247 |
+
showTypingIndicator(false);
|
248 |
+
addMessage('Sorry, I encountered a network error.', 'assistant');
|
249 |
+
showStatus('Network error: ' + error.message, 'error');
|
250 |
+
}
|
251 |
}
|
252 |
|
253 |
function addMessage(content, sender) {
|
|
|
266 |
messageDiv.appendChild(avatar);
|
267 |
messageDiv.appendChild(messageContent);
|
268 |
chatContainer.appendChild(messageDiv);
|
|
|
|
|
269 |
chatContainer.scrollTop = chatContainer.scrollHeight;
|
270 |
|
|
|
271 |
conversationHistory.push({ role: sender, content: content });
|
272 |
}
|
273 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
function showTypingIndicator(show) {
|
275 |
const indicator = document.getElementById('typing-indicator');
|
276 |
indicator.style.display = show ? 'block' : 'none';
|
|
|
277 |
if (show) {
|
278 |
const chatContainer = document.getElementById('chat-container');
|
279 |
chatContainer.scrollTop = chatContainer.scrollHeight;
|
|
|
284 |
const statusDiv = document.getElementById('status');
|
285 |
statusDiv.textContent = message;
|
286 |
statusDiv.className = `status ${type}`;
|
|
|
|
|
287 |
setTimeout(() => {
|
288 |
statusDiv.textContent = '';
|
289 |
statusDiv.className = 'status';
|
|
|
296 |
|
297 |
@app.route('/')
|
298 |
def home():
|
299 |
+
"""Main chat interface."""
|
300 |
+
return render_template_string(HTML_TEMPLATE, ollama_api_url=OLLAMA_API_URL, default_model=DEFAULT_MODEL)
|
|
|
|
|
301 |
|
302 |
@app.route('/api/chat', methods=['POST'])
|
303 |
def chat():
|
304 |
+
"""Chat API endpoint."""
|
305 |
try:
|
306 |
data = request.get_json()
|
307 |
if not data or 'message' not in data or 'model' not in data:
|
|
|
312 |
temperature = data.get('temperature', TEMPERATURE)
|
313 |
max_tokens = data.get('max_tokens', MAX_TOKENS)
|
314 |
|
315 |
+
result = ollama_client.generate(model, message, temperature=temperature, max_tokens=max_tokens)
|
316 |
+
return jsonify(result), 200 if result["status"] == "success" else 500
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
except Exception as e:
|
318 |
+
logging.error(f"Chat endpoint error: {e}")
|
319 |
return jsonify({"status": "error", "message": str(e)}), 500
|
320 |
|
321 |
@app.route('/api/models', methods=['GET'])
|
322 |
def get_models():
|
323 |
+
"""Get available models."""
|
324 |
try:
|
325 |
models = ollama_client.list_models()
|
326 |
return jsonify({
|
|
|
329 |
"count": len(models)
|
330 |
})
|
331 |
except Exception as e:
|
332 |
+
logging.error(f"Models endpoint error: {e}")
|
333 |
return jsonify({"status": "error", "message": str(e)}), 500
|
334 |
|
335 |
@app.route('/health', methods=['GET'])
|
336 |
def health_check():
|
337 |
+
"""Health check endpoint."""
|
338 |
try:
|
339 |
ollama_health = ollama_client.health_check()
|
340 |
return jsonify({
|
|
|
343 |
"timestamp": time.time()
|
344 |
})
|
345 |
except Exception as e:
|
346 |
+
logging.error(f"Health check endpoint error: {e}")
|
347 |
return jsonify({
|
348 |
"status": "unhealthy",
|
349 |
"error": str(e),
|
|
|
351 |
}), 500
|
352 |
|
353 |
if __name__ == '__main__':
|
354 |
+
app.run(host='0.0.0.0', port=7860, debug=False)
|