Spaces:
Sleeping
Sleeping
Commit
·
50389ad
1
Parent(s):
05d78c6
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import gradio as gr
|
3 |
+
from requests.exceptions import HTTPError
|
4 |
+
|
5 |
+
|
6 |
+
BaseApi = "https://api.openai.com/v1"
|
7 |
+
|
8 |
+
# Rate limits for each model
|
9 |
+
RateLimitPerModel = {
|
10 |
+
"gpt-3.5-turbo": 3500,
|
11 |
+
"gpt-4": 200,
|
12 |
+
"gpt-4-32k": 1000
|
13 |
+
}
|
14 |
+
|
15 |
+
|
16 |
+
def get_available_models(api_key):
|
17 |
+
headers = {"Authorization": f"Bearer {api_key}"}
|
18 |
+
url = f"{BaseApi}/engines"
|
19 |
+
|
20 |
+
try:
|
21 |
+
response = requests.get(url, headers=headers)
|
22 |
+
response_data = response.json()
|
23 |
+
available_models = [model["id"] for model in response_data.get("data", []) if model["id"] in {"gpt-4", "gpt-4-32k", "gpt-3.5-turbo"}]
|
24 |
+
return available_models
|
25 |
+
except Exception as e:
|
26 |
+
return []
|
27 |
+
|
28 |
+
def check_key_status(api_key, model):
|
29 |
+
headers = {"Authorization": f"Bearer {api_key}"}
|
30 |
+
url = f"{BaseApi}/chat/completions"
|
31 |
+
|
32 |
+
data = {
|
33 |
+
"model": model,
|
34 |
+
"messages": [{"role": "user", "content": ""}],
|
35 |
+
"max_tokens": -1
|
36 |
+
}
|
37 |
+
|
38 |
+
try:
|
39 |
+
response = requests.post(url, headers=headers, json=data)
|
40 |
+
response_data = response.json()
|
41 |
+
|
42 |
+
if response.status_code == 401:
|
43 |
+
return "Error: Invalid API key"
|
44 |
+
|
45 |
+
error_type = response_data.get("error", {}).get("type", "")
|
46 |
+
if error_type == "insufficient_quota" and model in {"gpt-4", "gpt-4-32k"}:
|
47 |
+
return f"Error: The key for {model} is out of quota but has GPT4."
|
48 |
+
elif error_type in ["insufficient_quota", "billing_not_active", "access_terminated"]:
|
49 |
+
return f"Error: The key for {model} is either out of quota, inactive, or access is terminated."
|
50 |
+
|
51 |
+
ratelimited = response.status_code == 429
|
52 |
+
if (response.status_code == 400 and error_type == "invalid_request_error") or ratelimited:
|
53 |
+
ratelimit = response.headers.get("x-ratelimit-limit-requests", "0")
|
54 |
+
org = response.headers.get("openai-organization", "user-xyz")
|
55 |
+
is_trial_key = "Trial Key" if int(ratelimit) < RateLimitPerModel.get(model, 0) else "Paid key"
|
56 |
+
return f"Key for {model} is working. Ratelimit: {ratelimit}, Organization: {org}, Key Type: {is_trial_key}"
|
57 |
+
|
58 |
+
except HTTPError as http_err:
|
59 |
+
return f"HTTP error occurred: {http_err}"
|
60 |
+
except Exception as e:
|
61 |
+
return f"Error occurred: {e}"
|
62 |
+
|
63 |
+
def check_model(api_key):
|
64 |
+
available_models = get_available_models(api_key)
|
65 |
+
if not available_models:
|
66 |
+
return "Error occurred: Unable to retrieve available models. Please check your API key."
|
67 |
+
|
68 |
+
model_info = "\n".join([check_key_status(api_key, model) for model in available_models])
|
69 |
+
return model_info
|
70 |
+
|
71 |
+
# Define Gradio interface
|
72 |
+
iface = gr.Interface(
|
73 |
+
fn=check_model,
|
74 |
+
inputs=gr.inputs.Textbox(placeholder="Enter your OpenAI API key", type="text"),
|
75 |
+
outputs=gr.outputs.Textbox(),
|
76 |
+
live=True,
|
77 |
+
title="OKC",
|
78 |
+
description="Enter your OpenAI API key to check the available models.",
|
79 |
+
)
|
80 |
+
|
81 |
+
iface.launch()
|