File size: 1,326 Bytes
66effb6
3357e42
9fadc51
6e8964d
 
9fadc51
6e8964d
 
 
 
 
 
532b4ba
9fadc51
 
 
3357e42
 
9fadc51
 
 
9f807f2
9fadc51
6e8964d
3357e42
5927468
 
8bc5e71
 
3357e42
9f807f2
cbd0cec
8bc5e71
 
b4c9fd2
9fadc51
9f807f2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import os
from openai import OpenAI

# Ensure you set your API token in your environment variables or pass it directly
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")  # Make sure it's set in your environment

if OPENAI_API_KEY is None:
    raise ValueError("Hugging Face API token is missing. Set it as an environment variable: OPENAI_API_KEY")


# Initialize the OpenAI client using the API keys
client = OpenAI(api_key=OPENAI_API_KEY)

EMBEDDING_MODEL = "text-embedding-3-large" # "text-embedding-3-small" 

def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
    """Get text embeddings from OpenAI."""
    result = client.embeddings.create(
      model=model,
      input=text
    )
    return result.data[0].embedding

COMPLETIONS_MODEL = "gpt-4o-mini"# "gpt-3.5-turbo-instruct" used earlier .

def get_response(messages: list[dict], model: str=COMPLETIONS_MODEL, 
                temperature=0, max_completion_tokens=800) -> str:
    """Chat completion using OpenAI's GPT models.
    https://platform.openai.com/docs/api-reference/chat/create """
    response = client.chat.completions.create(
        model=model,
        messages=messages,
        max_completion_tokens=max_completion_tokens,
        temperature=temperature,
        # stream=True
    )
    return response.choices[0].message.content