import gradio as gr import requests import datetime import json import base64 import tempfile import os # API endpoints LOGIN_URL = "http://164.52.195.95/login" CHAT_URL = "http://164.52.195.95/v1/chat/completions" # Global token access_token = None chat_history = [] def login_user(username, password): global access_token, chat_history print("Attempting login...") try: response = requests.post( LOGIN_URL, headers={"Content-Type": "application/x-www-form-urlencoded"}, data={ "grant_type": "password", "username": username, "password": password, "scope": "", "client_id": "string", "client_secret": "********" } ) print("Login response status:", response.status_code) response.raise_for_status() token_data = response.json() access_token = token_data["access_token"] chat_history = [] return gr.update(visible=False), gr.update(visible=True), "✅ Login successful!" except Exception as e: print("Login failed:", e) return gr.update(), gr.update(), f"❌ Login failed: {e}" def make_return(chatbot, msg="", image=None, alert_text=None): return chatbot, msg, image, gr.update(visible=bool(alert_text), value=alert_text or "") def send_message(message_text, image_files, chatbot): global chat_history, access_token print("Sending message:", message_text) if access_token is None: return make_return(chatbot + [{"role": "assistant", "content": "Please login first."}]) media_blocks = [] image_previews = [] if image_files: for image_file in image_files: try: with open(image_file, "rb") as img_f: base64_image = base64.b64encode(img_f.read()).decode("utf-8") data_uri = f"data:image/jpeg;base64,{base64_image}" media_blocks.append({ "type": "image_url", "image_url": {"url": data_uri} }) image_previews.append(data_uri) except Exception as e: print("Error encoding image:", e) return make_return(chatbot + [{"role": "user", "content": message_text}, {"role": "assistant", "content": f"Image processing failed: {e}"}], "", None, f"❌ Image processing failed: {e}") message_group = [] for i in range(0, len(chatbot), 2): user_msg = chatbot[i] assistant_msg = chatbot[i + 1] if i + 1 < len(chatbot) else {"role": "assistant", "content": ""} user_content = user_msg["content"] if isinstance(user_content, str): message_group.append({ "role": "user", "content": [{"type": "text", "text": user_content}] }) elif isinstance(user_content, list): message_group.append({ "role": "user", "content": user_content }) if isinstance(assistant_msg["content"], str): message_group.append({ "role": "assistant", "content": assistant_msg["content"] }) new_msg = [{"type": "text", "text": message_text}] if media_blocks: new_msg.extend(media_blocks) message_group.append({ "role": "user", "content": new_msg }) payload = { "model": "./models/Llama-3.2-11B-Vision-Instruct", "messages": message_group, "temperature": 0.3, "max_tokens": 500, "stream": True } # print("Final Payload:", json.dumps(payload, indent=2)[-1000:]) headers = { "Authorization": f"Bearer {access_token}", "Content-Type": "application/json" } response_text = "" try: with requests.post(CHAT_URL, headers=headers, json=payload, stream=True) as response: print("Streaming response...") if response.status_code != 200: try: error_json = response.json() error_msg = error_json.get("message", "Unknown error") except: error_msg = response.text return make_return(chatbot, "", None, f"❌ API Error {response.status_code}: {error_msg}") for line in response.iter_lines(): if line: line = line.decode("utf-8") print("Stream chunk:", line) if line.startswith("data:"): content = line[5:].strip() if content == "[DONE]": break try: data_json = json.loads(content) delta = data_json["choices"][0]["delta"] token = delta.get("content", "") response_text += token yield chatbot + [ {"role": "user", "content": message_text}, {"role": "assistant", "content": response_text} ], "", None, gr.update(visible=False) except Exception as json_err: print("Error parsing stream chunk:", json_err) continue except Exception as e: print("Error during streaming:", e) return make_return(chatbot + [ {"role": "user", "content": message_text}, {"role": "assistant", "content": f"❌ Error: {e}"} ], "", None, f"❌ {e}") print("Final assistant response:", response_text) return make_return(chatbot + [ {"role": "user", "content": message_text}, {"role": "assistant", "content": response_text} ]) # === UI === with gr.Blocks(css=""" .message-input-box textarea { border-radius: 16px !important; padding: 12px !important; background-color: #2c2c2c; color: white; } .message-input-box textarea::placeholder { color: #aaa; } .toast-alert { background-color: #ff4d4f; color: white; padding: 10px 16px; border-radius: 8px; margin: 10px 0; font-weight: bold; } """) as app: gr.Markdown("# 🤖 Secure AI Assistant Panel") with gr.Column(visible=True) as login_page: username = gr.Textbox(label="Username") password = gr.Textbox(label="Password", type="password") login_btn = gr.Button("Login") login_status = gr.Markdown(visible=True) with gr.Column(visible=False) as chat_page: chatbot = gr.Chatbot(label="Chat", show_label=False, height=420, type="messages") with gr.Row(): msg = gr.Textbox(label="", placeholder="Ask anything", elem_classes=["message-input-box"]) send_btn = gr.Button("Send") img = gr.File(label="Upload Images (optional)", type="filepath", interactive=True, file_types=[".png", ".jpg", ".jpeg"], file_count="multiple") image_preview = gr.Image(label="Image Preview", visible=False) error_alert = gr.Markdown(visible=False, elem_classes=["toast-alert"]) login_btn.click(fn=login_user, inputs=[username, password], outputs=[login_page, chat_page, login_status]) send_btn.click(fn=send_message, inputs=[msg, img, chatbot], outputs=[chatbot, msg, image_preview, error_alert]) app.launch()