Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -24,8 +24,8 @@ def extract_text_from_pdf(pdf_file):
|
|
24 |
def answer_question(question, context):
|
25 |
inputs = tokenizer.encode_plus(question, context, return_tensors="pt")
|
26 |
answer_start_scores, answer_end_scores = model(**inputs)
|
27 |
-
answer_start = torch.argmax(answer_start_scores.
|
28 |
-
answer_end = torch.argmax(answer_end_scores.
|
29 |
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs['input_ids'][0][answer_start:answer_end]))
|
30 |
return answer
|
31 |
|
@@ -49,7 +49,7 @@ if uploaded_file:
|
|
49 |
|
50 |
qa_chain = ConversationalRetrievalChain(
|
51 |
retriever=retriever,
|
52 |
-
llm=None, #
|
53 |
memory=memory
|
54 |
)
|
55 |
|
|
|
24 |
def answer_question(question, context):
|
25 |
inputs = tokenizer.encode_plus(question, context, return_tensors="pt")
|
26 |
answer_start_scores, answer_end_scores = model(**inputs)
|
27 |
+
answer_start = torch.argmax(answer_start_scores.start_logits)
|
28 |
+
answer_end = torch.argmax(answer_end_scores.end_logits) + 1
|
29 |
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs['input_ids'][0][answer_start:answer_end]))
|
30 |
return answer
|
31 |
|
|
|
49 |
|
50 |
qa_chain = ConversationalRetrievalChain(
|
51 |
retriever=retriever,
|
52 |
+
llm=None, # ลบ HuggingFaceHub เพราะไม่ได้ใช้งาน
|
53 |
memory=memory
|
54 |
)
|
55 |
|