Spaces:
Sleeping
Sleeping
import requests | |
import gradio as gr | |
from requests.exceptions import HTTPError | |
BaseApi = "https://api.openai.com/v1" | |
# Rate limits for each model | |
RateLimitPerModel = { | |
"gpt-3.5-turbo": 3500, | |
"gpt-4": 200, | |
"gpt-4-32k": 1000 | |
} | |
def get_available_models(api_key): | |
headers = {"Authorization": f"Bearer {api_key}"} | |
url = f"{BaseApi}/engines" | |
try: | |
response = requests.get(url, headers=headers) | |
response_data = response.json() | |
available_models = [model["id"] for model in response_data.get("data", []) if model["id"] in {"gpt-4", "gpt-4-32k", "gpt-3.5-turbo"}] | |
return available_models | |
except Exception as e: | |
return [] | |
def check_key_status(api_key, model): | |
headers = {"Authorization": f"Bearer {api_key}"} | |
url = f"{BaseApi}/chat/completions" | |
data = { | |
"model": model, | |
"messages": [{"role": "user", "content": ""}], | |
"max_tokens": -1 | |
} | |
try: | |
response = requests.post(url, headers=headers, json=data) | |
response_data = response.json() | |
if response.status_code == 401: | |
return "Error: Invalid API key" | |
error_type = response_data.get("error", {}).get("type", "") | |
if error_type == "insufficient_quota" and model in {"gpt-4", "gpt-4-32k"}: | |
return f"Error: The key for {model} is out of quota, but has gpt4" | |
elif error_type in ["insufficient_quota", "billing_not_active", "access_terminated"]: | |
return f"Error: The key for {model} is either out of quota, inactive, or access is terminated." | |
ratelimited = response.status_code == 429 | |
if (response.status_code == 400 and error_type == "invalid_request_error") or ratelimited: | |
ratelimit = response.headers.get("x-ratelimit-limit-requests", "0") | |
org = response.headers.get("openai-organization", "user-xyz") | |
is_trial_key = "Trial Key" if int(ratelimit) < RateLimitPerModel.get(model, 0) else "Paid Key" | |
return f"Key for {model} is working. Ratelimit: {ratelimit}, Organization: {org}, Key Type: {is_trial_key}" | |
except HTTPError as http_err: | |
return f"HTTP error occurred: {http_err}" | |
except Exception as e: | |
return f"Error occurred: {e}" | |
def check_models(api_key): | |
available_models = get_available_models(api_key) | |
if not available_models: | |
return "Error occurred: Unable to retrieve available models. Please check your API key." | |
model_info = "\n".join([check_key_status(api_key, model) for model in available_models]) | |
return model_info | |
# Define Gradio interface with a button to trigger model checking | |
def trigger_model_check(api_key): | |
return check_models(api_key) | |
iface = gr.Interface( | |
fn=trigger_model_check, | |
inputs=gr.inputs.Textbox(placeholder="Enter your OpenAI API key", type="text"), | |
outputs=gr.outputs.Textbox(), | |
live=False, | |
title="OKC", | |
allow_flagging=False, # Disable flagging to prevent unnecessary reporting | |
) | |
iface.launch() | |