jcmachicao's picture
Create aux.py
c7a03c8 verified
raw
history blame contribute delete
926 Bytes
import gradio as gr
from huggingface_hub import InferenceClient
# Use Meta's LLaMA 2 model
client = InferenceClient("meta-llama/Llama-2-7b-chat-hf", token="YOUR_HUGGING_FACE_TOKEN")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response