Spaces:
Running
Running
File size: 3,919 Bytes
7026716 0ec04e2 7026716 0ec04e2 e73ae5f 48ddf58 94eef02 e73ae5f 48ddf58 1561b35 7026716 48ddf58 0ec04e2 7026716 e73ae5f 7026716 0ec04e2 9db7e9e abca416 0ec04e2 7026716 0ec04e2 fe5d313 d192e97 abca416 fe5d313 abca416 0ec04e2 1561b35 9db7e9e 1561b35 83fea88 1561b35 d192e97 1561b35 fe5d313 9db7e9e 1561b35 338c269 04dff37 9db7e9e 1561b35 d192e97 fe5d313 0ec04e2 abca416 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import os
import gradio as gr
from openai import OpenAI
from typing import List, Tuple
# Define available models
AVAILABLE_MODELS = {
"Sonar Pro": "sonar-pro",
"Sonar": "sonar",
}
PX_ENDPOINT_URL = "https://api.perplexity.ai"
PX_API_KEY = os.getenv('PX_KEY')
PASSWORD = os.getenv("PASSWD") # Store the password in an environment variable
px_client = OpenAI(base_url=PX_ENDPOINT_URL, api_key=PX_API_KEY)
def respond(
message: str,
history: List[Tuple[str, str]],
system_message: str,
model_choice: str,
max_tokens: int,
temperature: float,
top_p: float,
):
"""Handles chatbot responses with Perplexity AI."""
if model_choice not in AVAILABLE_MODELS:
return "Error: Invalid model selection."
messages = [{"role": "system", "content": system_message}]
for user_msg, assistant_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
response = ""
citations = []
try:
stream = px_client.chat.completions.create(
model=AVAILABLE_MODELS[model_choice],
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True,
)
for chunk in stream:
if hasattr(chunk, "choices") and chunk.choices:
token = chunk.choices[0].delta.content or ""
response += token
yield response # Stream response as it arrives
if hasattr(chunk, "citations") and chunk.citations:
citations = chunk.citations
# Append citations as clickable links
if citations:
citation_text = "\n\nSources:\n" + "\n".join(
[f"[{i+1}] [{url}]({url})" for i, url in enumerate(citations)]
)
response += citation_text
yield response
except Exception as e:
yield f"Error: {str(e)}"
def check_password(input_password):
"""Validates the password before showing the chat interface."""
if input_password == PASSWORD:
return gr.update(visible=False), gr.update(visible=True)
else:
return gr.update(value="", interactive=True), gr.update(visible=False)
with gr.Blocks() as demo:
with gr.Column():
password_input = gr.Textbox(
type="password", label="Enter Password", interactive=True
)
submit_button = gr.Button("Submit")
error_message = gr.Textbox(
label="Error", visible=False, interactive=False
)
with gr.Column(visible=False) as chat_interface:
system_prompt = gr.Textbox(
value="You are a helpful assistant.", label="System message"
)
model_choice = gr.Dropdown(
choices=list(AVAILABLE_MODELS.keys()),
value=list(AVAILABLE_MODELS.keys())[0],
label="Select Model"
)
max_tokens = gr.Slider(
minimum=1, maximum=30000, value=2048, step=100, label="Max new tokens"
)
temperature = gr.Slider(
minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"
)
top_p = gr.Slider(
minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
)
chat = gr.ChatInterface(
respond,
api_name=False,
chatbot=gr.Chatbot(height=400), # Set the desired height here
additional_inputs=[system_prompt, model_choice, max_tokens, temperature, top_p] # Pass extra parameters
)
submit_button.click(
check_password, inputs=password_input, outputs=[password_input, chat_interface]
)
if __name__ == "__main__":
demo.launch()
|