grady / utils.py
rohan13's picture
Some message changes
17920dd
import os
import pickle
import langchain
import faiss
from langchain import HuggingFaceHub
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import DirectoryLoader, TextLoader, UnstructuredHTMLLoader, UnstructuredWordDocumentLoader, UnstructuredPDFLoader
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceHubEmbeddings
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain.cache import InMemoryCache
langchain.llm_cache = InMemoryCache()
global model_name
models = ["GPT-3.5", "Flan UL2", "GPT-4", "Flan T5"]
pickle_file = "_vs.pkl"
index_file = "_vs.index"
models_folder = "models/"
llm = ChatOpenAI(model_name="gpt-4", temperature=0.1)
embeddings = OpenAIEmbeddings(model='text-embedding-ada-002')
chat_history = []
memory = ConversationBufferWindowMemory(memory_key="chat_history", k=10)
vectorstore_index = None
system_template = """You are "Grady, a helpful grading assistant", a VERY LINIENT GRADER! You use the grading rubric template to grade answers given by students on the course material which you have access to in the vectorstore but you STRICTLY FOLLOW THE GRADING TEMPLATE while generating response.
You should be VERY LINIENT while giving marks and give the benefit of the doubt to the student even if there is something not stated as clearly as expected. Even if something is mentioned indirectly or remotely close to the solution, give FULL score.
Especially, when Solution is given but Reasoning is not accurate, give FULL score.
You have a strong personality and are firm in tone while replying like a tightly wound up person. You should always be loud and the center of attraction in the room.
Student will ask you a question in following format:
[Question No.]: [Answer]
For example:
1: This is my answer.
Every answer is to be scored as mentioned in the FEEDBACK SAMPLE TEMPLATE.
Rubric:
Question 11 Prompt in the Quiz:
Based on the facts and the description of the problems in the Paediatric Orthopaedic Clinic case, provide any one solution that you would attempt to improve the situation. Please provide your reasoning for your suggested solution – i.e., explain your intuition or connect to concepts of process analytics (if you have knowledge of these concepts – it is not required that you go over module 2 to address this question).
Please note that even if your suggested solution is not the most important one or is not even one that would likely improve the performance of the process once we consider all its implications, you will get full credit for your response as long as you have provided a logical reason for your suggestion. The suggested length for your response to this question is 100 words or 5-6 sentences, and the maximum acceptable is 200 words.
Possible Student Answers:
Schedule new and returning patients systematically (suggestions may include appointments and more specifics about how to schedule)
Add a surgeon and/or a resident
And/or divide work among surgeon and residents in a systematic/different way
Add more capacity at X-ray
And/or buy a dedicated X-ray machine for the clinic instead of sharing it with the Hospital
And/or locate the machine within the clinic eliminating the need to walk
And/or have the results electronically transmitted instead of patients waiting to walk the results back
And/or Schedule the X-ray department systematically to reduce the number of changeovers needed between lower and upper extremity X-rays.
Expand the number of working hours or days of the week that clinic operates.
Add more front desk or nurse capacity.
Reduce waiting times between tasks.
Inform patients about the times at each stage (through monitors such as we generally see in clinics) so that the waiting becomes less irritating.
Keep patients (kids) entertained with TVs and games throughout the process.
Have patients fill out forms online to reduce some of the time at the start
QUESTION 11 has two parts:
1) Solution
2) Reasoning
Solution: While we outline some of the possible student answers above, any other creative solution is acceptable if student explains why it is offered.
Reasoning: Students can support their response two ways and any one of them is acceptable:
By using words like busy and unavailable or something like that
OR
With reasoning that come from process analytics concepts (e.g., utilization, bottleneck, etc.).
Word count: A reasonable response would require a length of 50 to 100 words. Learners may write more or less and may even include flowcharts and include information on drawbacks of their suggested solution.
FEEDBACK SAMPLE TEMPLATE:
# QUESTION 11
| Score | QUESTION 11 Feedback Options |
|-------|------------------------------|
| 5 | **Full Points (Solution + Reasoning)**: Well-reasoned course of action! You successfully provided a logical solution that would improve the waiting time at the Paediatric Orthopaedic Clinic, such as [briefly indicate student’s solution]. You also provided a logical reasoning for it. Your answer was very insightful. (5 pts.) |
| 4 | **Partial Points (Solution is provided but reasoning is missing)**: Thank you for providing a logical solution that would improve the waiting time at the Paediatric Orthopaedic Clinic, such as [briefly indicate student’s solution]. The answer can be improved by providing reasoning regarding why you selected this solution. For example, if the suggested solution is “adding a surgeon or resident,” the reasoning would be “Because the staff appears to be too busy to deal with number of patients the clinic receives.” Another solution would be “Adding more working hours to the clinic” and the reasoning would be “The clinic is currently open three half-day sessions per week and considering number of patients they are receiving; this is not enough. “(4 pts.) |
| 2 | **Partial Points (Minor Attempt-At least one complete sentence or three or more words…)**: Thank you for your attempt, while this answer was relevant to the case, it was very brief. We were seeking a logical solution that would improve the waiting time at the Paediatric Orthopaedic Clinic with a relevant reasoning. For example, if the suggested solution is “adding a surgeon or resident”, the reasoning would be “Because the staff appears to be too busy to deal with number of patients the clinic receives.” Another solution would be “Adding more working hours to the clinic” and the reasoning would be “The clinic is currently open three half-day sessions per week and considering number of patients they are receiving; this is not enough.” (2 pts.) |
| 0 | **No Credits**: Blank – no response (0 pts.) |
----------------
{context}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)
def set_model_and_embeddings(model):
global chat_history
set_model(model)
# set_embeddings(model)
chat_history = []
def set_model(model):
global llm
print("Setting model to " + str(model))
if model == "GPT-3.5":
print("Loading GPT-3.5")
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
elif model == "GPT-4":
print("Loading GPT-4")
llm = ChatOpenAI(model_name="gpt-4", temperature=0.1)
elif model == "Flan UL2":
print("Loading Flan-UL2")
llm = HuggingFaceHub(repo_id="google/flan-ul2", model_kwargs={"temperature": 0.1, "max_new_tokens":500})
elif model == "Flan T5":
print("Loading Flan T5")
llm = HuggingFaceHub(repo_id="google/flan-t5-base", model_kwargs={"temperature": 0.1})
else:
print("Loading GPT-3.5 from else")
llm = ChatOpenAI(model_name="text-davinci-002", temperature=0.1)
def set_embeddings(model):
global embeddings
if model == "GPT-3.5" or model == "GPT-4":
print("Loading OpenAI embeddings")
embeddings = OpenAIEmbeddings(model='text-embedding-ada-002')
elif model == "Flan UL2" or model == "Flan T5":
print("Loading Hugging Face embeddings")
embeddings = HuggingFaceHubEmbeddings(repo_id="sentence-transformers/all-MiniLM-L6-v2")
def get_search_index(model):
global vectorstore_index
if os.path.isfile(get_file_path(model, pickle_file)) and os.path.isfile(
get_file_path(model, index_file)) and os.path.getsize(get_file_path(model, pickle_file)) > 0:
# Load index from pickle file
with open(get_file_path(model, pickle_file), "rb") as f:
search_index = pickle.load(f)
print("Loaded index")
else:
search_index = create_index(model)
print("Created index")
vectorstore_index = search_index
return search_index
def create_index(model):
source_chunks = create_chunk_documents()
search_index = search_index_from_docs(source_chunks)
faiss.write_index(search_index.index, get_file_path(model, index_file))
# Save index to pickle file
with open(get_file_path(model, pickle_file), "wb") as f:
pickle.dump(search_index, f)
return search_index
def get_file_path(model, file):
# If model is GPT3.5 or GPT4 return models_folder + openai + file else return models_folder + hf + file
if model == "GPT-3.5" or model == "GPT-4":
return models_folder + "openai" + file
else:
return models_folder + "hf" + file
def search_index_from_docs(source_chunks):
# print("source chunks: " + str(len(source_chunks)))
# print("embeddings: " + str(embeddings))
search_index = FAISS.from_documents(source_chunks, embeddings)
return search_index
def get_html_files():
loader = DirectoryLoader('docs', glob="**/*.docx", loader_cls=UnstructuredWordDocumentLoader, recursive=True)
document_list = loader.load()
return document_list
def fetch_data_for_embeddings():
document_list = get_text_files()
document_list.extend(get_html_files())
# use file_url_mapping to set metadata of document to url which has been set as the source
print("document list: " + str(len(document_list)))
return document_list
def get_text_files():
loader = DirectoryLoader('docs', glob="**/*.pdf", loader_cls=UnstructuredPDFLoader, recursive=True)
document_list = loader.load()
return document_list
def create_chunk_documents():
sources = fetch_data_for_embeddings()
splitter = CharacterTextSplitter(separator=" ", chunk_size=800, chunk_overlap=0)
source_chunks = splitter.split_documents(sources)
print("chunks: " + str(len(source_chunks)))
return source_chunks
def get_qa_chain(vectorstore_index):
global llm, model_name
print(llm)
# embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)
# compression_retriever = ContextualCompressionRetriever(base_compressor=embeddings_filter, base_retriever=gpt_3_5_index.as_retriever())
retriever = vectorstore_index.as_retriever(search_type="similarity_score_threshold",
search_kwargs={"score_threshold": .7})
chain = ConversationalRetrievalChain.from_llm(llm, retriever, return_source_documents=True,
verbose=True, get_chat_history=get_chat_history,
combine_docs_chain_kwargs={"prompt": CHAT_PROMPT})
return chain
def get_chat_history(inputs) -> str:
res = []
for human, ai in inputs:
res.append(f"Human:{human}\nAI:{ai}")
return "\n".join(res)
def generate_answer(question) -> str:
global chat_history, vectorstore_index
chain = get_qa_chain(vectorstore_index)
result = chain(
{"question": question, "chat_history": chat_history, "vectordbkwargs": {"search_distance": 0.6}})
chat_history = [(question, result["answer"])]
sources = []
print(result)
for document in result['source_documents']:
# sources.append(document.metadata['url'])
sources.append(document.metadata['source'].split('/')[-1])
print(sources)
source = ',\n'.join(set(sources))
return result['answer'] + '\nSOURCES: ' + source