File size: 3,878 Bytes
a807fc1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d88935
 
 
 
 
 
a807fc1
 
 
 
 
 
 
 
 
 
 
 
5d88935
 
 
 
 
 
 
 
 
a807fc1
 
 
 
 
 
 
 
 
 
 
 
 
5d88935
 
 
 
 
 
 
 
 
a807fc1
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import os


SYSTEM_PROMPT = "You are a programming assistant. You are solving the 2024 advent of code challenge."
PROMPT_TEMPLATE = """You are solving the 2024 advent of code challenge.
You will be provided the description of each challenge. You are to provide the solution to each given challenge.
1) You can reason and explain your logic before writing the code.
2) You must write the code such that it can be parsed into an actual python file.
3) It will be parsed by the evaluator, so it must be valid python code.
4) All of the code must be in a single code block, delimited by ```python and ```.
5) To count as a proper submission, the code must print the result to each question asked.
6) Each question will have a single string as an answer. Make sure to print it that string, and nothing else.
7) The actual input to the question will be provided in a file relative to the python file, e.g. "./input.txt". You must read and parse from the file accordingly. You can safely assume the file will always be relative to the python file.

Here is an example of a proper submission:

You reasoning goes here ...

```python


file = "input.txt"

def your_function(...)
    ...

...
print(result1)


def your_other_function(...)
    ...

...
print(result2)

```

Here is today's challenge description:
{problem_description}
"""


def build_prompt(
    problem_description: str, prompt_template: str = PROMPT_TEMPLATE
) -> str:
    return prompt_template.format(problem_description=problem_description)


def get_completion(
    provider: str,
    user_prompt: str,
    system_prompt: str,
    model: str,
    temperature: float,
) -> str:
    """
    Unified function to get completions from various LLM providers.
    """
    if provider == "openai":
        from openai import OpenAI
        OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
        assert (
            OPENAI_API_KEY
        ), "OPENAI_API_KEY is not set, please set it in your environment variables."
        openai_client = OpenAI(api_key=OPENAI_API_KEY)
        completion = openai_client.chat.completions.create(
            model=model,
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt},
            ],
            temperature=temperature,
        )
        # logger.info("Completion: %s", completion)
        return completion.choices[0].message.content

    elif provider == "gemini":

        # Setup
        import google.generativeai as genai
        AI_STUDIO_API_KEY = os.getenv("AI_STUDIO_API_KEY")
        assert (
            AI_STUDIO_API_KEY
        ), "AI_STUDIO_API_KEY is not set, please set it in your environment variables."
        genai.configure(api_key=AI_STUDIO_API_KEY)

        model = genai.GenerativeModel(
            model_name=model,
            system_instruction=system_prompt,
        )
        response = model.generate_content(
            user_prompt,
            generation_config=genai.types.GenerationConfig(temperature=temperature),
        )

        # logger.info("reponse: %s", response)
        return response.text

    elif provider == "anthropic":

        # Setup
        import anthropic
        ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
        assert (
            ANTHROPIC_API_KEY
        ), "ANTHROPIC_API_KEY is not set, please set it in your environment variables."
        anthropic_client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)

        response = anthropic_client.messages.create(
            model=model,
            max_tokens=2048,
            temperature=temperature,
            system=system_prompt,
            messages=[{"role": "user", "content": user_prompt}],
        )
        # logger.info("Response: %s", response)
        return response.content[0].text

    else:
        raise ValueError(f"Unknown provider: {provider}")