Spaces:
Sleeping
Sleeping
Update retrieval_parent.py
Browse files- retrieval_parent.py +2 -2
retrieval_parent.py
CHANGED
@@ -18,7 +18,7 @@ from langchain_core.documents import Document
|
|
18 |
|
19 |
# --- Configuration ---
|
20 |
GENERATION_MODEL = "llama-3.1-8b-instant"
|
21 |
-
RERANKER_MODEL = '
|
22 |
INITIAL_K_CANDIDATES = 20
|
23 |
TOP_K_CHUNKS = 10
|
24 |
|
@@ -43,7 +43,7 @@ async def generate_hypothetical_document(query: str, groq_api_key: str) -> str:
|
|
43 |
chat_completion = await client.chat.completions.create(
|
44 |
messages=[{"role": "user", "content": prompt}],
|
45 |
model=GENERATION_MODEL,
|
46 |
-
temperature=0.
|
47 |
max_tokens=500,
|
48 |
)
|
49 |
end_time = time.perf_counter() # <-- END TIMER
|
|
|
18 |
|
19 |
# --- Configuration ---
|
20 |
GENERATION_MODEL = "llama-3.1-8b-instant"
|
21 |
+
RERANKER_MODEL = 'BAAI/bge-reranker-v2-m3'
|
22 |
INITIAL_K_CANDIDATES = 20
|
23 |
TOP_K_CHUNKS = 10
|
24 |
|
|
|
43 |
chat_completion = await client.chat.completions.create(
|
44 |
messages=[{"role": "user", "content": prompt}],
|
45 |
model=GENERATION_MODEL,
|
46 |
+
temperature=0.3,
|
47 |
max_tokens=500,
|
48 |
)
|
49 |
end_time = time.perf_counter() # <-- END TIMER
|