artificialguybr commited on
Commit
f326ee0
Β·
verified Β·
1 Parent(s): 73cbb55

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +196 -0
app.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import json
4
+ import base64
5
+ from PIL import Image
6
+ import io
7
+ import time
8
+
9
+ def encode_image(image):
10
+ if isinstance(image, dict) and 'path' in image:
11
+ image_path = image['path']
12
+ elif isinstance(image, str):
13
+ image_path = image
14
+ else:
15
+ raise ValueError("Unsupported image format")
16
+
17
+ with open(image_path, "rb") as image_file:
18
+ return base64.b64encode(image_file.read()).decode('utf-8')
19
+
20
+ def bot_streaming(message, history, api_key, model, temperature, max_tokens, top_p, top_k, frequency_penalty, presence_penalty, repetition_penalty, stop, min_p, top_a, seed, logit_bias, logprobs, top_logprobs, response_format, tools, tool_choice):
21
+ headers = {
22
+ "Authorization": f"Bearer {api_key}",
23
+ "Content-Type": "application/json"
24
+ }
25
+
26
+ messages = []
27
+ images = []
28
+
29
+ for i, msg in enumerate(history):
30
+ if isinstance(msg[0], tuple):
31
+ image, text = msg[0]
32
+ base64_image = encode_image(image)
33
+ messages.append({
34
+ "role": "user",
35
+ "content": [
36
+ {"type": "text", "text": text},
37
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
38
+ ]
39
+ })
40
+ messages.append({"role": "assistant", "content": msg[1]})
41
+ images.append(Image.open(image['path'] if isinstance(image, dict) else image).convert("RGB"))
42
+ else:
43
+ messages.append({"role": "user", "content": msg[0]})
44
+ messages.append({"role": "assistant", "content": msg[1]})
45
+
46
+ if isinstance(message, dict) and "files" in message and message["files"]:
47
+ image = message["files"][0]
48
+ base64_image = encode_image(image)
49
+ content = [
50
+ {"type": "text", "text": message["text"]},
51
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
52
+ ]
53
+ images.append(Image.open(image['path'] if isinstance(image, dict) else image).convert("RGB"))
54
+ else:
55
+ content = message["text"] if isinstance(message, dict) else message
56
+
57
+ messages.append({"role": "user", "content": content})
58
+
59
+ data = {
60
+ "model": model,
61
+ "messages": messages,
62
+ "stream": True,
63
+ "temperature": temperature,
64
+ "max_tokens": max_tokens,
65
+ "top_p": top_p,
66
+ "top_k": top_k,
67
+ "frequency_penalty": frequency_penalty,
68
+ "presence_penalty": presence_penalty,
69
+ "repetition_penalty": repetition_penalty,
70
+ "stop": stop if stop else None,
71
+ "min_p": min_p,
72
+ "top_a": top_a,
73
+ "seed": seed,
74
+ "logit_bias": logit_bias,
75
+ "logprobs": logprobs,
76
+ "top_logprobs": top_logprobs,
77
+ "response_format": response_format,
78
+ "tools": tools,
79
+ "tool_choice": tool_choice
80
+ }
81
+
82
+ response = requests.post(
83
+ "https://openrouter.ai/api/v1/chat/completions",
84
+ headers=headers,
85
+ json=data,
86
+ stream=True
87
+ )
88
+
89
+ buffer = ""
90
+ for chunk in response.iter_lines():
91
+ if chunk:
92
+ chunk = chunk.decode('utf-8')
93
+ if chunk.startswith("data: "):
94
+ chunk = chunk[6:]
95
+ if chunk.strip() == "[DONE]":
96
+ break
97
+ try:
98
+ chunk_data = json.loads(chunk)
99
+ if 'choices' in chunk_data and len(chunk_data['choices']) > 0:
100
+ delta = chunk_data['choices'][0].get('delta', {})
101
+ if 'content' in delta:
102
+ buffer += delta['content']
103
+ yield buffer
104
+ time.sleep(0.01)
105
+ except json.JSONDecodeError:
106
+ continue
107
+
108
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
109
+ gr.Markdown("""
110
+ # πŸ€– OpenRouter API Multimodal Chat
111
+
112
+ Chat with various AI models using the OpenRouter API. Supports text and image interactions.
113
+
114
+ ## πŸš€ Quick Start:
115
+ 1. Enter your OpenRouter API key
116
+ 2. Choose a model
117
+ 3. Start chatting!
118
+
119
+ ## πŸ”§ Advanced:
120
+ - Adjust parameters in the "Advanced Settings" section
121
+ - Upload images for multimodal interactions
122
+
123
+ Enjoy your AI-powered conversation!
124
+ """)
125
+
126
+ with gr.Row():
127
+ with gr.Column(scale=1):
128
+ api_key = gr.Textbox(label="API Key", type="password", placeholder="Enter your OpenRouter API key")
129
+ model = gr.Dropdown(
130
+ label="Select Model",
131
+ choices=[
132
+ "google/gemini-flash-1.5",
133
+ "openai/gpt-4o-mini",
134
+ "anthropic/claude-3.5-sonnet:beta",
135
+ "gryphe/mythomax-l2-13b",
136
+ "meta-llama/llama-3.1-70b-instruct",
137
+ "microsoft/wizardlm-2-8x22b",
138
+ "nousresearch/hermes-3-llama-3.1-405b",
139
+ "mistralai/mistral-nemo",
140
+ "meta-llama/llama-3.1-8b-instruct",
141
+ "deepseek/deepseek-chat",
142
+ "mistralai/mistral-tiny",
143
+ "openai/gpt-4o",
144
+ "mistralai/mistral-7b-instruct",
145
+ "meta-llama/llama-3-70b-instruct",
146
+ "microsoft/wizardlm-2-7b"
147
+ ],
148
+ value="google/gemini-flash-1.5"
149
+ )
150
+
151
+ with gr.Accordion("Advanced Settings", open=False):
152
+ with gr.Group():
153
+ temperature = gr.Slider(minimum=0, maximum=2, value=1, step=0.1, label="Temperature")
154
+ max_tokens = gr.Slider(minimum=1, maximum=4096, value=1000, step=1, label="Max Tokens")
155
+ top_p = gr.Slider(minimum=0, maximum=1, value=1, step=0.01, label="Top P")
156
+ top_k = gr.Slider(minimum=0, maximum=100, value=0, step=1, label="Top K")
157
+
158
+ with gr.Group():
159
+ frequency_penalty = gr.Slider(minimum=-2, maximum=2, value=0, step=0.1, label="Frequency Penalty")
160
+ presence_penalty = gr.Slider(minimum=-2, maximum=2, value=0, step=0.1, label="Presence Penalty")
161
+ repetition_penalty = gr.Slider(minimum=0, maximum=2, value=1, step=0.1, label="Repetition Penalty")
162
+
163
+ with gr.Group():
164
+ stop = gr.Textbox(label="Stop Sequence")
165
+ min_p = gr.Slider(minimum=0, maximum=1, value=0, step=0.01, label="Min P")
166
+ top_a = gr.Slider(minimum=0, maximum=1, value=0, step=0.01, label="Top A")
167
+ seed = gr.Number(label="Seed", precision=0)
168
+
169
+ with gr.Group():
170
+ logit_bias = gr.Textbox(label="Logit Bias (JSON)")
171
+ logprobs = gr.Checkbox(label="Log Probabilities")
172
+ top_logprobs = gr.Slider(minimum=0, maximum=20, value=0, step=1, label="Top Log Probabilities")
173
+ response_format = gr.Textbox(label="Response Format (JSON)")
174
+ tools = gr.Textbox(label="Tools (JSON Array)")
175
+ tool_choice = gr.Textbox(label="Tool Choice")
176
+
177
+ with gr.Column(scale=2):
178
+ chatbot = gr.ChatInterface(
179
+ fn=bot_streaming,
180
+ additional_inputs=[
181
+ api_key, model, temperature, max_tokens, top_p, top_k,
182
+ frequency_penalty, presence_penalty, repetition_penalty, stop,
183
+ min_p, top_a, seed, logit_bias, logprobs, top_logprobs,
184
+ response_format, tools, tool_choice
185
+ ],
186
+ title="πŸ’¬ Chat with AI",
187
+ description="Upload images or type your message to start the conversation.",
188
+ retry_btn="πŸ”„ Retry",
189
+ undo_btn="↩️ Undo",
190
+ clear_btn="πŸ—‘οΈ Clear",
191
+ multimodal=True,
192
+ cache_examples=False,
193
+ fill_height=True,
194
+ )
195
+
196
+ demo.launch(debug=True, share=True)