Spaces:
Sleeping
Sleeping
File size: 8,132 Bytes
a02e161 9a7392a 9bf3f64 c489fd1 9a7392a c489fd1 9a7392a 9bf3f64 9a7392a a02e161 9bf3f64 9a7392a 47b6ea0 9bf3f64 bed5d33 9bf3f64 9a7392a 9bf3f64 9a7392a a02e161 9a7392a a02e161 9a7392a 6a98d1a 9bf3f64 47b6ea0 9a7392a 9bf3f64 9a7392a 9bf3f64 a02e161 9bf3f64 9a7392a 9bf3f64 9a7392a 9bf3f64 c489fd1 9bf3f64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
import gradio as gr
import requests
import json
import time
# --- API Configuration ---
BLACKBOX_URL = "https://api.blackbox.ai/api/chat"
# --- Model Configuration ---
api_models = {
"Lake 1 Mini": "mistralai/Mistral-Small-24B-Instruct-2501",
"Lake 1 Base": "databricks/dbrx-instruct",
"Lake 1 Chat": "deepseek-ai/deepseek-llm-67b-chat",
"Lake 1 Advanced": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
}
# Model-specific system prompts
MODEL_PROMPTS = {
"Lake 1 Mini": "You are a general-purpose AI assistant focused on providing concise and practical answers.",
"Lake 1 Base": "You are a technical expert AI specializing in detailed explanations and step-by-step solutions.",
"Lake 1 Chat": "You are a friendly conversational AI that prioritizes natural dialogue and approachable responses.",
"Lake 1 Advanced": "You are an advanced AI capable of expert-level analysis and critical thinking."
}
# --- Rate Limits ---
STANDARD_RPM = 4
PLUS_RPM = 8
PRO_RPM = 16
STANDARD_TPM = 1200
PLUS_TPM = 2400
PRO_TPM = 4800
# --- Magic Word Secrets ---
MAGIC_WORD_SECRET_1 = "SourSesameManager"
MAGIC_WORD_SECRET_2 = "BeanedSesameHockey"
def get_system_message(model: str, preset: str, access: str) -> str:
"""Generate combined system message with model-specific and access-level prompts"""
base_prompt = MODEL_PROMPTS.get(model, "You are a helpful AI assistant. Your Creator is BI Corp")
preset_modes = {
"Fast": "Prioritize speed over detail",
"Normal": "Balance speed and detail",
"Quality": "Prioritize detailed, comprehensive responses"
}
access_levels = {
"standard": f"Standard access: Limited to {STANDARD_RPM} requests/min",
"plus": f"Plus access: Up to {PLUS_RPM} requests/min",
"pro": f"Pro access: Maximum {PRO_RPM} requests/min"
}
return (
f"{base_prompt}\n"
f"Mode: {preset_modes[preset]}\n"
f"Access: {access_levels[access]}\n"
"Respond appropriately to the user's query:"
)
def check_rate_limit(settings_state: dict) -> bool:
"""Check if user has exceeded their RPM limit"""
current_time = time.time()
last_reset = settings_state.get("last_reset", 0)
# Reset counter if more than 60 seconds have passed
if current_time - last_reset > 60:
settings_state["request_count"] = 0
settings_state["last_reset"] = current_time
max_rpm = PRO_RPM if settings_state["access"] == "pro" else \
PLUS_RPM if settings_state["access"] == "plus" else STANDARD_RPM
if settings_state.get("request_count", 0) >= max_rpm:
return False
settings_state["request_count"] = settings_state.get("request_count", 0) + 1
return True
def call_blackbox_api(messages: list, model: str, max_new_tokens: int) -> str:
headers = {'Content-Type': 'application/json'}
payload = json.dumps({
"messages": messages,
"model": model,
"max_tokens": str(max_new_tokens)
})
response = requests.post(BLACKBOX_URL, headers=headers, data=payload)
if response.status_code == 200 and "application/json" in response.headers.get('Content-Type', ''):
try:
data = response.json()
if 'choices' in data and data['choices']:
return data['choices'][0]['message']['content']
else:
return "Error: Unexpected response format."
except Exception as e:
return f"Error parsing JSON: {e}"
else:
return f"{response.text}"
def generate_response(message: str, model_name: str, preset: str, access: str) -> str:
max_tokens = PRO_TPM if access == "pro" else PLUS_TPM if access == "plus" else STANDARD_TPM
api_model = api_models.get(model_name, api_models["Lake 1 Mini"])
messages = [
{"role": "system", "content": get_system_message(model_name, preset, access)},
{"role": "user", "content": message}
]
return call_blackbox_api(messages, api_model, max_tokens)
def chat_handler(message, history, settings_state):
if not check_rate_limit(settings_state):
return history + [
{"role": "user", "content": message},
{"role": "assistant", "content": f"Rate limit exceeded! Current plan allows {settings_state['access']} RPM."}
]
response = generate_response(
message,
settings_state["model"],
settings_state["preset"],
settings_state["access"]
)
return history + [{"role": "user", "content": message}, {"role": "assistant", "content": response}]
def update_settings(model, preset, magic_word):
access = "pro" if magic_word == MAGIC_WORD_SECRET_2 else \
"plus" if magic_word == MAGIC_WORD_SECRET_1 else "standard"
models = ["Lake 1 Mini", "Lake 1 Base", "Lake 1 Chat"] + \
(["Lake 1 Advanced"] if access in ["pro", "plus"] else [])
new_state = {
"model": model,
"preset": preset,
"access": access,
"request_count": 0,
"last_reset": time.time()
}
return (
new_state,
f"**Settings:** Model: {model} | Preset: {preset} | Access: {access.title()}",
gr.update(choices=models, value=models[0])
)
def create_interface():
css = """
.donate-btn, .subscribe-btn {
background: linear-gradient(45deg, #4CAF50, #45a049);
color: white;
border: none;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
transition: all 0.3s;
}
.donate-btn:hover, .subscribe-btn:hover {
transform: scale(1.05);
box-shadow: 0 4px 8px rgba(0,0,0,0.2);
}
.rate-limit {
color: #ff4444;
font-weight: bold;
margin: 10px 0;
}
@keyframes typing {
0% { opacity: 0.5; }
50% { opacity: 1; }
100% { opacity: 0.5; }
}
.typing-indicator {
animation: typing 1.5s infinite;
font-size: 0.9em;
color: #666;
}
"""
with gr.Blocks(title="Lake AI", css=css, theme=gr.themes.Soft()) as app:
state = gr.State({
"model": "Lake 1 Mini",
"preset": "Normal",
"access": "standard",
"request_count": 0,
"last_reset": time.time()
})
with gr.Tab("Chat"):
gr.Markdown("# π Lake AI Assistant")
chatbot = gr.Chatbot(height=400, label="Conversation", type="messages")
msg = gr.Textbox(label="Your Message", placeholder="Type here...")
with gr.Row():
send_btn = gr.Button("Send", variant="primary")
send_btn.click(chat_handler, [msg, chatbot, state], chatbot)
with gr.Row():
gr.Button("β Donate", elem_classes="donate-btn").click(
None, None, None, js="window.open('https://buymeacoffee.com/bronio_int')"
)
gr.Button("π Subscribe", elem_classes="subscribe-btn").click(
None, None, None, js="window.open('https://patreon.com/YourPageHere')"
)
msg.submit(chat_handler, [msg, chatbot, state], chatbot)
with gr.Tab("Settings"):
with gr.Row():
with gr.Column():
model = gr.Dropdown(
["Lake 1 Mini", "Lake 1 Base", "Lake 1 Chat"],
label="AI Model",
value="Lake 1 Mini"
)
preset = gr.Dropdown(
["Fast", "Normal", "Quality"],
label="Performance Mode",
value="Normal"
)
key = gr.Textbox(label="Premium Key", type="password")
status = gr.Markdown()
gr.Button("Apply Settings").click(
update_settings, [model, preset, key], [state, status, model]
)
return app
if __name__ == "__main__":
create_interface().launch() |