|
import gradio as gr |
|
import requests |
|
|
|
URL = "https://openrouter.ai/api/v1/models" |
|
|
|
response = requests.get(URL) |
|
|
|
print(response.json()) |
|
|
|
all_models = [a["name"] for a in response.json()["data"]] |
|
name_to_dict = {a["name"]: a for a in response.json()["data"]} |
|
|
|
|
|
def get_cost(model_name, input_token, output_token): |
|
model_dict = name_to_dict[model_name] |
|
prompt_cost = float(model_dict["pricing"]["prompt"]) |
|
completion_cost = float(model_dict["pricing"]["completion"]) |
|
|
|
context_length = float(model_dict["context_length"]) |
|
|
|
|
|
return prompt_cost * min(context_length, input_token) + completion_cost * output_token |
|
|
|
get_cost('Google: Gemini Pro 1.0', 1000, 1000) |
|
|
|
demo = gr.Interface( |
|
fn=get_cost, |
|
inputs=[ |
|
gr.Dropdown(choices=all_models), |
|
gr.Number(label="Input tokens"), |
|
gr.Number(label="Output tokens"), |
|
], |
|
outputs="text", |
|
title="LLM Cost Calculator", |
|
description="Calculate the cost of a prompt", |
|
) |
|
|
|
demo.launch() |