Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -25,16 +25,18 @@ custom_css = """
|
|
| 25 |
# Verwende die integrierten Embeddings von ChromaDB
|
| 26 |
embedding_function = embedding_functions.DefaultEmbeddingFunction()
|
| 27 |
#client = Client("Qwen/Qwen2.5-72B-Instruct")
|
| 28 |
-
client = groq.Client(api_key=api_key)
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
| 31 |
try:
|
| 32 |
# Use Llama 3 70B powered by Groq for text generation
|
| 33 |
completion = client.chat.completions.create(
|
| 34 |
model="llama3-70b-8192",
|
| 35 |
messages=[
|
| 36 |
{"role": "system", "content": "You are a helpful assistant."},
|
| 37 |
-
{"role": "user", "content":
|
| 38 |
],
|
| 39 |
)
|
| 40 |
return completion.choices[0].message.content
|
|
|
|
| 25 |
# Verwende die integrierten Embeddings von ChromaDB
|
| 26 |
embedding_function = embedding_functions.DefaultEmbeddingFunction()
|
| 27 |
#client = Client("Qwen/Qwen2.5-72B-Instruct")
|
|
|
|
| 28 |
|
| 29 |
+
|
| 30 |
+
def update(message):
|
| 31 |
+
client = groq.Client(api_key=api_key)
|
| 32 |
+
|
| 33 |
try:
|
| 34 |
# Use Llama 3 70B powered by Groq for text generation
|
| 35 |
completion = client.chat.completions.create(
|
| 36 |
model="llama3-70b-8192",
|
| 37 |
messages=[
|
| 38 |
{"role": "system", "content": "You are a helpful assistant."},
|
| 39 |
+
{"role": "user", "content": f"{message} antworte immer auf deutsch"}
|
| 40 |
],
|
| 41 |
)
|
| 42 |
return completion.choices[0].message.content
|