Spaces:
Sleeping
Sleeping
Updated LLM generation parameters
Browse files
app.py
CHANGED
@@ -43,9 +43,9 @@ def generate_llm_response(text, model_id="ccibeekeoc42/Llama3.1-8b-base-SFT-2024
|
|
43 |
{"role": "system", "content": "You are a BRIEF AND DIRECT assistant. A part of a speech pipeline so keep your responses short, fluent, and straight to the point. Avoid markdown in responses."},
|
44 |
{"role": "user", "content": text}
|
45 |
],
|
46 |
-
top_p=
|
47 |
-
temperature=
|
48 |
-
max_tokens=
|
49 |
stream=True,
|
50 |
seed=None,
|
51 |
stop=None,
|
|
|
43 |
{"role": "system", "content": "You are a BRIEF AND DIRECT assistant. A part of a speech pipeline so keep your responses short, fluent, and straight to the point. Avoid markdown in responses."},
|
44 |
{"role": "user", "content": text}
|
45 |
],
|
46 |
+
top_p=0.3,
|
47 |
+
temperature=1,
|
48 |
+
max_tokens=750,
|
49 |
stream=True,
|
50 |
seed=None,
|
51 |
stop=None,
|