Spaces:
Sleeping
Sleeping
import gradio as gr | |
from llama_index.llms import Perplexity | |
from llama_index.llms.base import ChatMessage | |
# Define the main function for handling chat interactions. | |
def chat_with_pplx_model(api_key, model_name, user_input, pre_prompt, system_message, temperature, max_tokens): | |
# Convert max_tokens to integer if it is not empty. This controls the length of the model's responses. | |
max_tokens = int(max_tokens) if max_tokens else None | |
# Prepend the pre-prompt text to the user input. This allows for setting a context or instructions. | |
full_user_input = f"{pre_prompt}\n{user_input}" | |
# Initialize the Perplexity model with the given parameters. | |
llm = Perplexity( | |
api_key=api_key, | |
model_name=model_name, | |
temperature=temperature, | |
max_tokens=max_tokens # If max_tokens is None, the model uses its default value. | |
) | |
# Prepare the chat messages for interaction with the model. | |
messages_dict = [ | |
{"role": "system", "content": system_message}, # System message, like an initial greeting or instructions. | |
{"role": "user", "content": full_user_input} # The actual user input, prepended with the pre_prompt. | |
] | |
messages = [ChatMessage(**msg) for msg in messages_dict] | |
# Get the response from the LLM. | |
response = llm.chat(messages) | |
return response | |
# Gradio Interface components. | |
api_key_input = gr.Textbox(label="API Key") # Input for the API key. | |
model_name_dropdown = gr.Dropdown(choices=["pplx-70b-online", "pplx-7b-online", "mixtral-8x7b-instruct"], label="LLM Model Name") # Input for the model name. | |
user_input = gr.Textbox(placeholder="Enter your input here", label="User Input") # Input for user's message. | |
pre_prompt_input = gr.Textbox(placeholder="Enter pre-prompt here", label="Pre-Prompt") # Input for the pre-prompt text. | |
system_message = gr.Textbox(placeholder="Enter system message here", label="System Message") # Input for system message. | |
temperature_slider = gr.Slider(minimum=0, maximum=2, step=0.01, label="Temperature") # Slider to adjust the temperature. | |
max_tokens_input = gr.Textbox(placeholder="Enter max tokens (optional)", label="Max Tokens") # Input for max tokens. | |
# Creating the Gradio interface. | |
iface = gr.Interface( | |
fn=chat_with_pplx_model, | |
inputs=[api_key_input, model_name_dropdown, user_input, pre_prompt_input, system_message, temperature_slider, max_tokens_input], | |
outputs="text" | |
) | |
# Launching the interface. | |
iface.launch(share=True) |