| from langchain_core.pydantic_v1 import BaseModel, Field | |
| from typing import List | |
| from langchain_core.output_parsers import JsonOutputParser | |
| from langchain_core.prompts import PromptTemplate | |
| class QA(BaseModel): | |
| question: str = Field(description="question") | |
| answer: str = Field(description="answer") | |
| class AutoQA(BaseModel): | |
| questions: List[QA] = Field(description="list of question and answers") | |
| qa_prompt_template = """ | |
| Come up with the 10 questions and answers that could be commonly asked by people about the following research paper. | |
| The question and answers should capture the whole essence of the research paper | |
| The answers should be a bit detailed and strictly based on the research paper. | |
| Your response should be recorded in the following json format: {format_instructions}. | |
| here is the research paper: ####{paper}#### | |
| """ | |
| auto_qa_output_parser = JsonOutputParser(pydantic_object=AutoQA) | |
| qa_prompt = PromptTemplate( | |
| template=qa_prompt_template, | |
| input_variables=["paper"], | |
| partial_variables={ | |
| "format_instructions": auto_qa_output_parser.get_format_instructions() | |
| }, | |
| ) | |
| auto_qa_chain = lambda model: qa_prompt | model | |
| followup_prompt_template = """ | |
| Question: {question} | |
| Answer: {answer} | |
| Based on the above question and answer and the research paper as your context, come up with a followup question and its answer. | |
| The answer should be a bit detailed and strictly based on the research paper. | |
| Your response should be recorded in the following json format: {format_instructions}. | |
| here is the research paper: ####{paper}#### | |
| """ | |
| followup_prompt = PromptTemplate( | |
| template=followup_prompt_template, | |
| input_variables=["paper"], | |
| partial_variables={ | |
| "format_instructions": auto_qa_output_parser.get_format_instructions() | |
| }, | |
| ) | |
| followup_qa_chain = lambda model: qa_prompt | model | |