|
|
|
""" |
|
Enhanced Modern UI for GPT-OSS-120B Chat Interface |
|
""" |
|
|
|
import sys |
|
import time |
|
import threading |
|
import markdown |
|
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, |
|
QHBoxLayout, QTextEdit, QLineEdit, QPushButton, |
|
QLabel, QScrollArea, QFrame, QGroupBox, QSpinBox, |
|
QSizePolicy, QProgressBar, QSplitter, QToolButton, |
|
QMenu, QAction, QFileDialog, QMessageBox) |
|
from PyQt5.QtCore import Qt, QThread, pyqtSignal, QTimer, QSize |
|
from PyQt5.QtGui import QFont, QTextCursor, QPalette, QColor, QIcon, QTextCharFormat, QSyntaxHighlighter, QTextDocument |
|
from mlx_lm import load, generate |
|
import logging |
|
import re |
|
import json |
|
from datetime import datetime |
|
from typing import List, Dict |
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
class ModelLoaderThread(QThread): |
|
"""Thread for loading the model to prevent UI freezing""" |
|
model_loaded = pyqtSignal() |
|
model_error = pyqtSignal(str) |
|
progress_update = pyqtSignal(str) |
|
|
|
def __init__(self): |
|
super().__init__() |
|
|
|
def run(self): |
|
try: |
|
self.progress_update.emit("Downloading model files...") |
|
logger.info("π Loading GPT-OSS-120B...") |
|
model, tokenizer = load("mlx-community/gpt-oss-120b-MXFP4-Q4") |
|
logger.info("β
Model loaded successfully!") |
|
self.progress_update.emit("Model loaded successfully!") |
|
self.model_loaded.emit() |
|
except Exception as e: |
|
logger.error(f"Failed to load model: {e}") |
|
self.model_error.emit(str(e)) |
|
|
|
|
|
class GenerationThread(QThread): |
|
"""Thread for generating responses to prevent UI freezing""" |
|
response_ready = pyqtSignal(str, float) |
|
error_occurred = pyqtSignal(str) |
|
progress_update = pyqtSignal(str) |
|
|
|
def __init__(self, model, tokenizer, prompt, max_tokens): |
|
super().__init__() |
|
self.model = model |
|
self.tokenizer = tokenizer |
|
self.prompt = prompt |
|
self.max_tokens = max_tokens |
|
|
|
def run(self): |
|
try: |
|
start_time = time.time() |
|
|
|
|
|
self.progress_update.emit("Formatting prompt...") |
|
messages = [{"role": "user", "content": self.prompt}] |
|
formatted_prompt = self.tokenizer.apply_chat_template( |
|
messages, add_generation_prompt=True |
|
) |
|
|
|
|
|
self.progress_update.emit("Generating response...") |
|
response = generate( |
|
self.model, |
|
self.tokenizer, |
|
prompt=formatted_prompt, |
|
max_tokens=self.max_tokens, |
|
verbose=False |
|
) |
|
|
|
|
|
self.progress_update.emit("Processing response...") |
|
final_response = self.extract_final_response(response) |
|
generation_time = time.time() - start_time |
|
|
|
self.response_ready.emit(final_response, generation_time) |
|
|
|
except Exception as e: |
|
self.error_occurred.emit(str(e)) |
|
|
|
def extract_final_response(self, response: str) -> str: |
|
"""Extract the final assistant response from the chat template""" |
|
|
|
if "<|start|>assistant" in response: |
|
parts = response.split("<|start|>assistant") |
|
if len(parts) > 1: |
|
final_part = parts[-1] |
|
|
|
|
|
final_part = re.sub(r'<\|channel\|>[^<]+', '', final_part) |
|
final_part = final_part.replace('<|message|>', '') |
|
final_part = final_part.replace('<|end|>', '') |
|
|
|
|
|
final_part = re.sub(r'<[^>]+>', '', final_part) |
|
final_part = final_part.strip() |
|
|
|
if final_part: |
|
return final_part |
|
|
|
|
|
cleaned = re.sub(r'<\|[^>]+\|>', '', response) |
|
cleaned = re.sub(r'<[^>]+>', '', cleaned) |
|
return cleaned.strip() |
|
|
|
|
|
class CodeHighlighter(QSyntaxHighlighter): |
|
"""Basic syntax highlighter for code blocks""" |
|
def __init__(self, parent=None): |
|
super().__init__(parent) |
|
|
|
self.highlighting_rules = [] |
|
|
|
|
|
keyword_format = QTextCharFormat() |
|
keyword_format.setForeground(QColor("#569CD6")) |
|
keyword_format.setFontWeight(QFont.Bold) |
|
keywords = ["def", "class", "return", "import", "from", "as", "if", |
|
"else", "elif", "for", "while", "try", "except", "finally"] |
|
for word in keywords: |
|
pattern = r'\b' + word + r'\b' |
|
self.highlighting_rules.append((re.compile(pattern), keyword_format)) |
|
|
|
|
|
string_format = QTextCharFormat() |
|
string_format.setForeground(QColor("#CE9178")) |
|
self.highlighting_rules.append((re.compile(r'\".*\"'), string_format)) |
|
self.highlighting_rules.append((re.compile(r'\'.*\''), string_format)) |
|
|
|
|
|
comment_format = QTextCharFormat() |
|
comment_format.setForeground(QColor("#6A9955")) |
|
self.highlighting_rules.append((re.compile(r'#.*'), comment_format)) |
|
|
|
def highlightBlock(self, text): |
|
for pattern, format in self.highlighting_rules: |
|
for match in pattern.finditer(text): |
|
start, end = match.span() |
|
self.setFormat(start, end - start, format) |
|
|
|
|
|
class ChatMessageWidget(QWidget): |
|
"""Custom widget for displaying chat messages""" |
|
def __init__(self, is_user, message, timestamp=None, generation_time=None): |
|
super().__init__() |
|
self.is_user = is_user |
|
|
|
layout = QVBoxLayout() |
|
layout.setContentsMargins(15, 8, 15, 8) |
|
|
|
|
|
header_layout = QHBoxLayout() |
|
|
|
sender_icon = QLabel("π€" if is_user else "π€") |
|
sender_label = QLabel("You" if is_user else "GPT-OSS-120B") |
|
sender_label.setStyleSheet("font-weight: bold; color: #2E86AB;" if is_user else "font-weight: bold; color: #A23B72;") |
|
|
|
time_text = timestamp if timestamp else datetime.now().strftime("%H:%M:%S") |
|
time_label = QLabel(time_text) |
|
time_label.setStyleSheet("color: #777; font-size: 11px;") |
|
|
|
header_layout.addWidget(sender_icon) |
|
header_layout.addWidget(sender_label) |
|
header_layout.addStretch() |
|
header_layout.addWidget(time_label) |
|
|
|
if generation_time and not is_user: |
|
speed_label = QLabel(f"{generation_time:.1f}s") |
|
speed_label.setStyleSheet("color: #777; font-size: 11px;") |
|
header_layout.addWidget(speed_label) |
|
|
|
layout.addLayout(header_layout) |
|
|
|
|
|
message_display = QTextEdit() |
|
message_display.setReadOnly(True) |
|
|
|
|
|
formatted_message = self.format_message(message) |
|
message_display.setHtml(formatted_message) |
|
|
|
message_display.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) |
|
message_display.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded) |
|
message_display.setStyleSheet(""" |
|
QTextEdit { |
|
background-color: %s; |
|
border: 1px solid %s; |
|
border-radius: 12px; |
|
padding: 12px; |
|
margin: 2px; |
|
font-size: 14px; |
|
} |
|
""" % ("#E8F4F8" if is_user else "#F8F0F5", "#B8D8E8" if is_user else "#E8C6DE")) |
|
|
|
|
|
message_display.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) |
|
message_display.setMinimumHeight(50) |
|
message_display.setMaximumHeight(600) |
|
|
|
|
|
if not is_user and self.contains_code(message): |
|
highlighter = CodeHighlighter(message_display.document()) |
|
|
|
layout.addWidget(message_display) |
|
self.setLayout(layout) |
|
|
|
def format_message(self, message): |
|
"""Format message with basic HTML styling""" |
|
|
|
html = markdown.markdown(message) |
|
|
|
|
|
styled_html = f""" |
|
<div style="font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; |
|
line-height: 1.4; color: #333;"> |
|
{html} |
|
</div> |
|
""" |
|
return styled_html |
|
|
|
def contains_code(self, message): |
|
"""Check if message contains code-like content""" |
|
code_indicators = ["def ", "class ", "import ", "function ", "var ", "const ", "=", "()", "{}", "[]"] |
|
return any(indicator in message for indicator in code_indicators) |
|
|
|
|
|
class GPTOSSChatUI(QMainWindow): |
|
def __init__(self): |
|
super().__init__() |
|
self.model = None |
|
self.tokenizer = None |
|
self.conversation_history = [] |
|
self.max_tokens = 2048 |
|
self.generation_thread = None |
|
self.model_loader_thread = None |
|
|
|
self.init_ui() |
|
self.load_model_in_background() |
|
|
|
def init_ui(self): |
|
"""Initialize the user interface""" |
|
self.setWindowTitle("GPT-OSS-120B Chat") |
|
self.setGeometry(100, 100, 1400, 900) |
|
|
|
|
|
central_widget = QWidget() |
|
self.setCentralWidget(central_widget) |
|
|
|
|
|
main_layout = QHBoxLayout(central_widget) |
|
main_layout.setContentsMargins(15, 15, 15, 15) |
|
main_layout.setSpacing(15) |
|
|
|
|
|
left_panel = QFrame() |
|
left_panel.setMinimumWidth(280) |
|
left_panel.setMaximumWidth(350) |
|
left_panel.setFrameShape(QFrame.StyledPanel) |
|
left_panel_layout = QVBoxLayout(left_panel) |
|
left_panel_layout.setContentsMargins(12, 12, 12, 12) |
|
|
|
|
|
title_label = QLabel("GPT-OSS-120B Chat") |
|
title_label.setStyleSheet("font-size: 18px; font-weight: bold; color: #2E86AB; margin-bottom: 15px;") |
|
title_label.setAlignment(Qt.AlignCenter) |
|
left_panel_layout.addWidget(title_label) |
|
|
|
|
|
model_info_group = QGroupBox("π€ Model Information") |
|
model_info_group.setStyleSheet("QGroupBox { font-weight: bold; }") |
|
model_info_layout = QVBoxLayout() |
|
|
|
model_details = [ |
|
("GPT-OSS-120B", "font-weight: bold; font-size: 14px; color: #333;"), |
|
("120B parameters, 4-bit quantized", "color: #666; font-size: 12px;"), |
|
("Apple M3 Ultra β’ 512GB RAM", "color: #666; font-size: 12px;"), |
|
("Performance: ~95 tokens/second", "color: #4CAF50; font-size: 12px; font-weight: bold;") |
|
] |
|
|
|
for text, style in model_details: |
|
label = QLabel(text) |
|
label.setStyleSheet(style) |
|
label.setWordWrap(True) |
|
model_info_layout.addWidget(label) |
|
|
|
model_info_group.setLayout(model_info_layout) |
|
left_panel_layout.addWidget(model_info_group) |
|
|
|
|
|
settings_group = QGroupBox("βοΈ Generation Settings") |
|
settings_group.setStyleSheet("QGroupBox { font-weight: bold; }") |
|
settings_layout = QVBoxLayout() |
|
|
|
|
|
tokens_layout = QHBoxLayout() |
|
tokens_label = QLabel("Max Tokens:") |
|
tokens_label.setStyleSheet("font-weight: bold;") |
|
self.tokens_spinner = QSpinBox() |
|
self.tokens_spinner.setRange(128, 4096) |
|
self.tokens_spinner.setValue(2048) |
|
self.tokens_spinner.valueChanged.connect(self.update_max_tokens) |
|
self.tokens_spinner.setStyleSheet("padding: 6px; border-radius: 4px;") |
|
tokens_layout.addWidget(tokens_label) |
|
tokens_layout.addWidget(self.tokens_spinner) |
|
settings_layout.addLayout(tokens_layout) |
|
|
|
settings_group.setLayout(settings_layout) |
|
left_panel_layout.addWidget(settings_group) |
|
|
|
|
|
conv_group = QGroupBox("π¬ Conversation") |
|
conv_group.setStyleSheet("QGroupBox { font-weight: bold; }") |
|
conv_layout = QVBoxLayout() |
|
|
|
clear_btn = QPushButton("ποΈ Clear Conversation") |
|
clear_btn.clicked.connect(self.clear_conversation) |
|
clear_btn.setStyleSheet("text-align: left; padding: 8px;") |
|
conv_layout.addWidget(clear_btn) |
|
|
|
export_btn = QPushButton("πΎ Export Conversation") |
|
export_btn.clicked.connect(self.export_conversation) |
|
export_btn.setStyleSheet("text-align: left; padding: 8px;") |
|
conv_layout.addWidget(export_btn) |
|
|
|
conv_group.setLayout(conv_layout) |
|
left_panel_layout.addWidget(conv_group) |
|
|
|
left_panel_layout.addStretch() |
|
|
|
|
|
self.status_indicator = QLabel("π‘ Loading model...") |
|
self.status_indicator.setStyleSheet("color: #666; font-size: 11px; margin-top: 10px;") |
|
left_panel_layout.addWidget(self.status_indicator) |
|
|
|
|
|
right_panel = QWidget() |
|
right_panel_layout = QVBoxLayout(right_panel) |
|
right_panel_layout.setContentsMargins(0, 0, 0, 0) |
|
|
|
|
|
self.chat_scroll = QScrollArea() |
|
self.chat_scroll.setWidgetResizable(True) |
|
self.chat_scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) |
|
self.chat_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) |
|
self.chat_scroll.setStyleSheet("background-color: #FAFAFA; border: none;") |
|
|
|
self.chat_container = QWidget() |
|
self.chat_layout = QVBoxLayout(self.chat_container) |
|
self.chat_layout.setAlignment(Qt.AlignTop) |
|
self.chat_layout.setSpacing(10) |
|
self.chat_layout.setContentsMargins(10, 10, 10, 10) |
|
|
|
self.chat_scroll.setWidget(self.chat_container) |
|
right_panel_layout.addWidget(self.chat_scroll) |
|
|
|
|
|
input_frame = QFrame() |
|
input_frame.setStyleSheet("background-color: white; border-top: 1px solid #EEE;") |
|
input_layout = QVBoxLayout(input_frame) |
|
input_layout.setContentsMargins(15, 15, 15, 15) |
|
|
|
|
|
input_top_layout = QHBoxLayout() |
|
self.message_input = QTextEdit() |
|
self.message_input.setPlaceholderText("Type your message here... (Shift+Enter for new line)") |
|
self.message_input.setMaximumHeight(100) |
|
self.message_input.setStyleSheet(""" |
|
QTextEdit { |
|
padding: 12px; |
|
border: 2px solid #DDD; |
|
border-radius: 8px; |
|
font-size: 14px; |
|
} |
|
QTextEdit:focus { |
|
border-color: #2E86AB; |
|
} |
|
""") |
|
self.message_input.textChanged.connect(self.update_char_count) |
|
input_top_layout.addWidget(self.message_input) |
|
|
|
self.send_btn = QPushButton("Send") |
|
self.send_btn.setFixedSize(80, 50) |
|
self.send_btn.clicked.connect(self.send_message) |
|
self.send_btn.setStyleSheet(""" |
|
QPushButton { |
|
background-color: #2E86AB; |
|
color: white; |
|
border: none; |
|
border-radius: 8px; |
|
font-weight: bold; |
|
} |
|
QPushButton:hover { |
|
background-color: #1F5E7A; |
|
} |
|
QPushButton:disabled { |
|
background-color: #CCCCCC; |
|
} |
|
""") |
|
input_top_layout.addWidget(self.send_btn) |
|
|
|
input_layout.addLayout(input_top_layout) |
|
|
|
|
|
bottom_layout = QHBoxLayout() |
|
self.char_count = QLabel("0 characters") |
|
self.char_count.setStyleSheet("color: #777; font-size: 11px;") |
|
bottom_layout.addWidget(self.char_count) |
|
|
|
bottom_layout.addStretch() |
|
|
|
|
|
clear_input_btn = QPushButton("Clear Input") |
|
clear_input_btn.setStyleSheet("font-size: 11px; padding: 4px 8px;") |
|
clear_input_btn.clicked.connect(self.clear_input) |
|
bottom_layout.addWidget(clear_input_btn) |
|
|
|
input_layout.addLayout(bottom_layout) |
|
right_panel_layout.addWidget(input_frame) |
|
|
|
|
|
main_layout.addWidget(left_panel) |
|
main_layout.addWidget(right_panel) |
|
|
|
|
|
self.statusBar().showMessage("Ready") |
|
|
|
|
|
self.apply_styles() |
|
|
|
def apply_styles(self): |
|
"""Apply modern styling to the UI""" |
|
self.setStyleSheet(""" |
|
QMainWindow { |
|
background-color: #F5F5F7; |
|
} |
|
QGroupBox { |
|
font-weight: bold; |
|
border: 1px solid #E0E0E0; |
|
border-radius: 8px; |
|
margin-top: 10px; |
|
padding-top: 20px; |
|
background-color: white; |
|
} |
|
QGroupBox::title { |
|
subcontrol-origin: margin; |
|
left: 10px; |
|
padding: 0 8px 0 8px; |
|
color: #2E86AB; |
|
} |
|
QPushButton { |
|
background-color: #2E86AB; |
|
color: white; |
|
border: none; |
|
padding: 8px 16px; |
|
border-radius: 6px; |
|
font-weight: bold; |
|
} |
|
QPushButton:hover { |
|
background-color: #1F5E7A; |
|
} |
|
QPushButton:disabled { |
|
background-color: #CCCCCC; |
|
} |
|
QScrollArea { |
|
border: none; |
|
background-color: #FAFAFA; |
|
} |
|
QSpinBox { |
|
padding: 6px; |
|
border: 1px solid #DDD; |
|
border-radius: 4px; |
|
background-color: white; |
|
} |
|
QFrame { |
|
background-color: white; |
|
border-radius: 8px; |
|
} |
|
""") |
|
|
|
def update_char_count(self): |
|
"""Update character count label""" |
|
text = self.message_input.toPlainText() |
|
self.char_count.setText(f"{len(text)} characters") |
|
|
|
def clear_input(self): |
|
"""Clear the input field""" |
|
self.message_input.clear() |
|
|
|
def load_model_in_background(self): |
|
"""Load the model in a separate thread to prevent UI freezing""" |
|
self.statusBar().showMessage("Loading model...") |
|
self.status_indicator.setText("π‘ Loading model...") |
|
self.send_btn.setEnabled(False) |
|
self.message_input.setEnabled(False) |
|
self.tokens_spinner.setEnabled(False) |
|
|
|
self.model_loader_thread = ModelLoaderThread() |
|
self.model_loader_thread.model_loaded.connect(self.model_loaded) |
|
self.model_loader_thread.model_error.connect(self.model_error) |
|
self.model_loader_thread.progress_update.connect(self.update_progress) |
|
self.model_loader_thread.start() |
|
|
|
def update_progress(self, message): |
|
"""Update progress message""" |
|
self.status_indicator.setText(f"π‘ {message}") |
|
|
|
def model_loaded(self): |
|
"""Called when model is successfully loaded""" |
|
from mlx_lm import load, generate |
|
|
|
try: |
|
self.model, self.tokenizer = load("mlx-community/gpt-oss-120b-MXFP4-Q4") |
|
self.statusBar().showMessage("Model loaded and ready!") |
|
self.status_indicator.setText("π’ Model loaded and ready!") |
|
self.send_btn.setEnabled(True) |
|
self.message_input.setEnabled(True) |
|
self.tokens_spinner.setEnabled(True) |
|
|
|
|
|
welcome_msg = """Hello! I'm GPT-OSS-120B, running locally on your M3 Ultra. |
|
|
|
I'm a 120 billion parameter open-source language model, and I'm here to assist you with: |
|
- Answering questions |
|
- Generating creative content |
|
- Explaining complex concepts |
|
- Writing and analyzing code |
|
- And much more! |
|
|
|
How can I help you today?""" |
|
self.add_message(False, welcome_msg, 0.0) |
|
|
|
|
|
QTimer.singleShot(100, self.scroll_to_bottom) |
|
except Exception as e: |
|
self.model_error(str(e)) |
|
|
|
def model_error(self, error_msg): |
|
"""Called when model loading fails""" |
|
self.statusBar().showMessage(f"Error loading model: {error_msg}") |
|
self.status_indicator.setText(f"π΄ Error: {error_msg}") |
|
error_widget = ChatMessageWidget(False, f"Error loading model: {error_msg}") |
|
self.chat_layout.addWidget(error_widget) |
|
self.send_btn.setEnabled(False) |
|
self.message_input.setEnabled(False) |
|
|
|
def send_message(self): |
|
"""Send the current message""" |
|
message = self.message_input.toPlainText().strip() |
|
if not message or not self.model: |
|
return |
|
|
|
|
|
self.add_message(True, message) |
|
self.message_input.clear() |
|
|
|
|
|
self.send_btn.setEnabled(False) |
|
self.message_input.setEnabled(False) |
|
self.tokens_spinner.setEnabled(False) |
|
self.statusBar().showMessage("Generating response...") |
|
self.status_indicator.setText("π‘ Generating response...") |
|
|
|
|
|
self.generation_thread = GenerationThread( |
|
self.model, self.tokenizer, message, self.max_tokens |
|
) |
|
self.generation_thread.response_ready.connect(self.handle_response) |
|
self.generation_thread.error_occurred.connect(self.handle_error) |
|
self.generation_thread.progress_update.connect(self.update_progress) |
|
self.generation_thread.start() |
|
|
|
def handle_response(self, response, generation_time): |
|
"""Handle the generated response""" |
|
self.add_message(False, response, generation_time) |
|
|
|
|
|
self.send_btn.setEnabled(True) |
|
self.message_input.setEnabled(True) |
|
self.tokens_spinner.setEnabled(True) |
|
self.statusBar().showMessage("Ready") |
|
self.status_indicator.setText("π’ Ready") |
|
|
|
|
|
self.scroll_to_bottom() |
|
|
|
def handle_error(self, error_msg): |
|
"""Handle generation errors""" |
|
self.add_message(False, f"Error: {error_msg}", 0.0) |
|
|
|
|
|
self.send_btn.setEnabled(True) |
|
self.message_input.setEnabled(True) |
|
self.tokens_spinner.setEnabled(True) |
|
self.statusBar().showMessage("Error occurred") |
|
self.status_indicator.setText("π΄ Error occurred") |
|
|
|
|
|
self.scroll_to_bottom() |
|
|
|
def add_message(self, is_user, message, generation_time=0.0): |
|
"""Add a message to the chat history""" |
|
|
|
self.conversation_history.append({ |
|
"is_user": is_user, |
|
"message": message, |
|
"timestamp": datetime.now().strftime("%H:%M:%S"), |
|
"generation_time": generation_time |
|
}) |
|
|
|
|
|
message_widget = ChatMessageWidget(is_user, message, datetime.now().strftime("%H:%M:%S"), generation_time) |
|
self.chat_layout.addWidget(message_widget) |
|
|
|
def clear_conversation(self): |
|
"""Clear the conversation history""" |
|
|
|
self.conversation_history = [] |
|
|
|
|
|
for i in reversed(range(self.chat_layout.count())): |
|
widget = self.chat_layout.itemAt(i).widget() |
|
if widget: |
|
widget.setParent(None) |
|
|
|
|
|
welcome_msg = "Hello! I'm GPT-OSS-120B. How can I assist you today?" |
|
self.add_message(False, welcome_msg, 0.0) |
|
|
|
|
|
self.scroll_to_bottom() |
|
|
|
def export_conversation(self): |
|
"""Export the conversation to a file""" |
|
try: |
|
options = QFileDialog.Options() |
|
file_path, _ = QFileDialog.getSaveFileName( |
|
self, "Save Conversation", "conversation.json", "JSON Files (*.json)", options=options |
|
) |
|
|
|
if file_path: |
|
if not file_path.endswith('.json'): |
|
file_path += '.json' |
|
|
|
with open(file_path, 'w', encoding='utf-8') as f: |
|
json.dump(self.conversation_history, f, indent=2, ensure_ascii=False) |
|
|
|
QMessageBox.information(self, "Success", f"Conversation exported to {file_path}") |
|
except Exception as e: |
|
QMessageBox.critical(self, "Error", f"Failed to export conversation: {str(e)}") |
|
|
|
def update_max_tokens(self, value): |
|
"""Update the maximum tokens for generation""" |
|
self.max_tokens = value |
|
|
|
def scroll_to_bottom(self): |
|
"""Scroll the chat area to the bottom""" |
|
scrollbar = self.chat_scroll.verticalScrollBar() |
|
scrollbar.setValue(scrollbar.maximum()) |
|
|
|
def keyPressEvent(self, event): |
|
"""Handle key press events""" |
|
if event.key() == Qt.Key_Return and event.modifiers() & Qt.ShiftModifier: |
|
|
|
self.message_input.insertPlainText("\n") |
|
elif event.key() == Qt.Key_Return: |
|
|
|
self.send_message() |
|
else: |
|
super().keyPressEvent(event) |
|
|
|
|
|
def main(): |
|
app = QApplication(sys.argv) |
|
|
|
|
|
app.setStyle('Fusion') |
|
font = QFont("SF Pro Text", 12) |
|
app.setFont(font) |
|
|
|
|
|
chat_ui = GPTOSSChatUI() |
|
chat_ui.show() |
|
|
|
sys.exit(app.exec_()) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |