Spaces:
Sleeping
Sleeping
import os | |
from openai import OpenAI | |
# Ensure you set your API token in your environment variables or pass it directly | |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Make sure it's set in your environment | |
if OPENAI_API_KEY is None: | |
raise ValueError("Hugging Face API token is missing. Set it as an environment variable: OPENAI_API_KEY") | |
# Initialize the OpenAI client using the API keys | |
client = OpenAI(api_key=OPENAI_API_KEY) | |
EMBEDDING_MODEL = "text-embedding-3-large" # "text-embedding-3-small" | |
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]: | |
"""Get text embeddings from OpenAI.""" | |
result = client.embeddings.create( | |
model=model, | |
input=text | |
) | |
return result.data[0].embedding | |
COMPLETIONS_MODEL = "gpt-4o-mini"# "gpt-3.5-turbo-instruct" used earlier . | |
def get_response(messages: list[dict], model: str=COMPLETIONS_MODEL, | |
temperature=0, max_completion_tokens=800) -> str: | |
"""Chat completion using OpenAI's GPT models. | |
https://platform.openai.com/docs/api-reference/chat/create """ | |
response = client.chat.completions.create( | |
model=model, | |
messages=messages, | |
max_completion_tokens=max_completion_tokens, | |
temperature=temperature, | |
# stream=True | |
) | |
return response.choices[0].message.content |