{ "cells": [ { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "# !pip install nest_asyncio \\\n", "# langchain_openai langchain_huggingface langchain_core langchain langchain_community langchain-text-splitters \\\n", "# python-pptx==1.0.2 nltk==3.9.1 pymupdf lxml \\\n", "# sentence-transformers IProgress \\\n", "# huggingface_hub ipywidgets \\\n", "# qdrant-client langchain_experimental\n", "\n", "# !pip install sentence_transformers datasets pyarrow\n", "# !pip install torch\n", "# !pip install accelerate>=0.26.0\n", "# !pip install transformers\n", "# !pip install wandb\n", "# !pip install ragas\n", "\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "\n", "import nest_asyncio\n", "\n", "nest_asyncio.apply()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import os\n", "import getpass\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter Your OpenAI API Key: \")\n", "os.environ[\"RAGAS_APP_TOKEN\"] = getpass.getpass(\"Please enter your Ragas API key!\")" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "hf_username = getpass.getpass(\"Enter Your Hugging Face Username: \")\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "7171fa2fd73446349406e23d4f6b898f", "version_major": 2, "version_minor": 0 }, "text/plain": [ "VBox(children=(HTML(value='
Display W&B run" ], "text/plain": [ "" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#!pip install wandb\n", "\n", "import wandb\n", "wandb.init(mode=\"disabled\")" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [], "source": [ "#commented out for now as want to run whole notebook but not retrain\n", "# warmup_steps = int(len(loader) * EPOCHS * 0.1)\n", "\n", "# model.fit(\n", "# train_objectives=[(loader, train_loss)],\n", "# epochs=EPOCHS,\n", "# warmup_steps=warmup_steps,\n", "# output_path='models/midterm-compare-arctic-embed-m-ft',\n", "# show_progress_bar=True,\n", "# evaluator=evaluator,\n", "# evaluation_steps=50\n", "# )" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [], "source": [ "#commented out for now as want to run whole notebook but not sending to hub\n", "#model.push_to_hub(f\"{hf_username}/midterm-compare-arctic-embed-m-ft\")" ] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of BertModel were not initialized from the model checkpoint at drewgenai/midterm-compare-arctic-embed-m-ft and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "finetune_embeddings = HuggingFaceEmbeddings(model_name=f\"{hf_username}/midterm-compare-arctic-embed-m-ft\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "###testingabove" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of BertModel were not initialized from the model checkpoint at drewgenai/midterm-compare-arctic-embed-m-ft and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "from sentence_transformers import SentenceTransformer\n", "from langchain.vectorstores import Qdrant\n", "from langchain.embeddings import HuggingFaceEmbeddings\n", "\n", "\n", "# Load the SentenceTransformer model\n", "#model_id = \"Snowflake/snowflake-arctic-embed-m\"\n", "model_id = f\"{hf_username}/midterm-compare-arctic-embed-m-ft\" \n", "embedding_model = HuggingFaceEmbeddings(model_name=model_id)\n", "# model_id = \"Snowflake/snowflake-arctic-embed-m\"\n", "# embedding_model = HuggingFaceEmbeddings(model_name=model_id)\n", "# model_id = \"Snowflake/snowflake-arctic-embed-m-v2.0\"\n", "# embedding_model = HuggingFaceEmbeddings(model_name=model_id, model_kwargs={\"trust_remote_code\": True})\n", "\n", "\n", "# Load documents into Qdrant\n", "qdrant_vectorstore = Qdrant.from_documents(\n", " documents_with_metadata,\n", " embedding_model,\n", " location=\":memory:\", # In-memory for testing\n", " collection_name=\"document_comparison\",\n", ")\n", "\n", "# Create a retriever\n", "qdrant_retriever = qdrant_vectorstore.as_retriever()" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [], "source": [ "from langchain_core.prompts import ChatPromptTemplate\n", "RAG_PROMPT = \"\"\"\n", "CONTEXT:\n", "{context}\n", "\n", "QUERY:\n", "{question}\n", "\n", "You are a helpful assistant. Use the available context to answer the question.\n", "\n", "Return the response in **valid JSON format** with the following structure:\n", "\n", "[\n", " {{\n", " \"Derived Description\": \"A short name for the matched concept\",\n", " \"Protocol_1_Name\": \"Protocol 1 - Matching Element\",\n", " \"Protocol_2_Name\": \"Protocol 2 - Matching Element\"\n", " }},\n", " ...\n", "]\n", "\n", "### Rules:\n", "1. Only output **valid JSON** with no explanations, summaries, or markdown formatting.\n", "2. Ensure each entry in the JSON list represents a single matched data element from the two protocols.\n", "3. If no matching element is found in a protocol, leave it empty (\"\").\n", "4. **Do NOT include headers, explanations, or additional formatting**—only return the raw JSON list.\n", "5. It should include all the elements in the two protocols.\n", "6. If it cannot match the element, create the row and include the protocol it did find and put \"could not match\" in the other protocol column.\n", "\"\"\"\n", "\n", "rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)\n", "\n", "from langchain_openai import ChatOpenAI\n", "\n", "#openai_chat_model = ChatOpenAI(model=\"gpt-4o\")\n", "openai_chat_model = ChatOpenAI(model=\"gpt-4o-mini\")\n", "\n", "from operator import itemgetter\n", "from langchain.schema.output_parser import StrOutputParser\n", "\n", "rag_chain = (\n", " {\"context\": itemgetter(\"question\") | qdrant_retriever, \"question\": itemgetter(\"question\")}\n", " | rag_prompt | openai_chat_model | StrOutputParser()\n", ")" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [], "source": [ "question_text = \"\"\"Between these two files containing protocols, can you find the data elements in each that most likely match the element in the other and output a CSV that lists three columns:\n", "\n", "The questions within elements will be similar between the two documents and can be used to match the elements.\n", "\n", "1. Derived description from the two documents describing the index/measure/scale.\n", "2. A column for each standard.\n", "3. In the column for each name/version, the data element used to capture that description.\n", "\n", "There should only be one row for each scale/index/etc.\n", "The description should not be one of the questions but a name that best describes the similar data elements.\"\"\"\n", "\n", "response_text = rag_chain.invoke({\"question\": question_text})\n", "# response = rag_chain.invoke({\"question\": question_text})" ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✅ CSV file saved: matching_data_elements.csv\n" ] } ], "source": [ "import json\n", "import pandas as pd\n", "\n", "def parse_rag_output(response_text):\n", " \"\"\"Extract structured JSON data from the RAG response.\"\"\"\n", " try:\n", " structured_data = json.loads(response_text)\n", "\n", " # Ensure similarity score is always included\n", " for item in structured_data:\n", " item.setdefault(\"Similarity Score\", \"N/A\") # Default if missing\n", "\n", " return structured_data\n", " except json.JSONDecodeError:\n", " print(\"Error: Response is not valid JSON.\")\n", " return None\n", "\n", "def save_to_csv(data, directory=\"./output\", filename=\"matching_data_elements.csv\"):\n", " \"\"\"Save structured data to CSV.\"\"\"\n", " if not data:\n", " print(\"No data to save.\")\n", " return\n", "\n", " file_path = os.path.join(directory, filename)\n", " df = pd.DataFrame(data, columns=[\"Derived Description\", \"Protocol_1_Name\", \"Protocol_2_Name\"]) # Ensure correct columns\n", " df.to_csv(file_path, index=False)\n", " print(f\"✅ CSV file saved: {filename}\")\n", "\n", "# Run the pipeline\n", "structured_output = parse_rag_output(response_text)\n", "save_to_csv(structured_output)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [], "source": [ "# rag_chain.invoke({\"question\" : \"Based on the types of questions asked under each heading. can you identify the headings in one document that most closely match the second document. list them e.g paincoping/doc1 painstrategy/doc2\"})" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [], "source": [ "# rag_chain.invoke({\"question\" : \"Based on the types of questions asked under each heading. can you identify the headings in one document that most closely match the second document. list them e.g paincoping/doc1 painstrategy/doc2. these are example headings not the ones in the actual documents. just list the matches not the rational. Can you list multiple matches?\"})" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [], "source": [ "### ragas testing below\n", "#docs = documents_with_metadata\n", "docs = text_loader.load()" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [], "source": [ "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "RAG_PROMPT = \"\"\"\\\n", "Given a provided context and a question, you must answer the question. If you do not know the answer, you must state that you do not know.\n", "\n", "Context:\n", "{context}\n", "\n", "Question:\n", "{question}\n", "\n", "Answer:\n", "\"\"\"\n", "\n", "rag_prompt_template = ChatPromptTemplate.from_template(RAG_PROMPT)" ] }, { "cell_type": "code", "execution_count": 40, "metadata": {}, "outputs": [], "source": [ "rag_llm = ChatOpenAI(\n", " model=\"gpt-4o-mini\",\n", " temperature=0\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of BertModel were not initialized from the model checkpoint at drewgenai/midterm-compare-arctic-embed-m-ft and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n", "/tmp/ipykernel_5461/1883233562.py:10: LangChainDeprecationWarning: The class `OpenAIEmbeddings` was deprecated in LangChain 0.0.9 and will be removed in 1.0. An updated version of the class exists in the :class:`~langchain-openai package and should be used instead. To use it run `pip install -U :class:`~langchain-openai` and import as `from :class:`~langchain_openai import OpenAIEmbeddings``.\n", " openai_embedding_model = OpenAIEmbeddings(model=openai_model_id)\n" ] } ], "source": [ "from langchain.embeddings import OpenAIEmbeddings\n", "\n", "base_model_id = f\"Snowflake/snowflake-arctic-embed-m\" \n", "base_embedding_model = HuggingFaceEmbeddings(model_name=base_model_id)\n", "\n", "finetune_model_id = f\"{hf_username}/midterm-compare-arctic-embed-m-ft\" \n", "finetune_embedding_model = HuggingFaceEmbeddings(model_name=finetune_model_id)\n", "\n", "openai_model_id = \"text-embedding-3-small\"\n", "openai_embedding_model = OpenAIEmbeddings(model=openai_model_id)\n" ] }, { "cell_type": "code", "execution_count": 43, "metadata": {}, "outputs": [], "source": [ "#from langchain_community.vectorstores import FAISS\n", "\n", "### try qdrant?\n", "\n", "qdrant_vectorstore_base = Qdrant.from_documents(\n", " docs,\n", " base_embedding_model,\n", " location=\":memory:\", # In-memory for testing\n", " collection_name=\"document_comparison\",\n", ")\n", "\n", "\n", "base_retriever = qdrant_vectorstore_base.as_retriever(search_kwargs={\"k\": 6})\n", "\n", "qdrant_vectorstore_finetune = Qdrant.from_documents(\n", " docs,\n", " finetune_embedding_model,\n", " location=\":memory:\", # In-memory for testing\n", " collection_name=\"document_comparison\",\n", ")\n", "\n", "\n", "finetune_retriever = qdrant_vectorstore_finetune.as_retriever(search_kwargs={\"k\": 6})\n", "\n", "\n", "\n", "qdrant_vectorstore_openai = Qdrant.from_documents(\n", " docs,\n", " openai_embedding_model,\n", " location=\":memory:\", # In-memory for testing\n", " collection_name=\"document_comparison\",\n", ")\n", "\n", "\n", "openai_retriever = qdrant_vectorstore_openai.as_retriever(search_kwargs={\"k\": 6})\n" ] }, { "cell_type": "code", "execution_count": 44, "metadata": {}, "outputs": [], "source": [ "\n", "# # Create a retriever\n", "# qdrant_retriever = qdrant_vectorstore.as_retriever()\n", "\n", "\n", "\n", "\n", "\n", "# ###\n", "\n", "# base_vectorstore = FAISS.from_documents(training_documents, base_embedding_model)\n", "# base_retriever = base_vectorstore.as_retriever(search_kwargs={\"k\": 6})" ] }, { "cell_type": "code", "execution_count": 45, "metadata": {}, "outputs": [], "source": [ "from langchain.schema.runnable import RunnablePassthrough\n", "\n", "base_rag_chain = (\n", " {\"context\": itemgetter(\"question\") | base_retriever, \"question\": itemgetter(\"question\")}\n", " | RunnablePassthrough.assign(context=itemgetter(\"context\"))\n", " | {\"response\": rag_prompt_template | rag_llm | StrOutputParser(), \"context\": itemgetter(\"context\")}\n", ")" ] }, { "cell_type": "code", "execution_count": 46, "metadata": {}, "outputs": [], "source": [ "finetune_rag_chain = (\n", " {\"context\": itemgetter(\"question\") | finetune_retriever, \"question\": itemgetter(\"question\")}\n", " | RunnablePassthrough.assign(context=itemgetter(\"context\"))\n", " | {\"response\": rag_prompt_template | rag_llm | StrOutputParser(), \"context\": itemgetter(\"context\")}\n", ")" ] }, { "cell_type": "code", "execution_count": 47, "metadata": {}, "outputs": [], "source": [ "from langchain.schema.runnable import RunnablePassthrough\n", "\n", "openai_rag_chain = (\n", " {\"context\": itemgetter(\"question\") | openai_retriever, \"question\": itemgetter(\"question\")}\n", " | RunnablePassthrough.assign(context=itemgetter(\"context\"))\n", " | {\"response\": rag_prompt_template | rag_llm | StrOutputParser(), \"context\": itemgetter(\"context\")}\n", ")" ] }, { "cell_type": "code", "execution_count": 87, "metadata": {}, "outputs": [], "source": [ "\n" ] }, { "cell_type": "code", "execution_count": 48, "metadata": {}, "outputs": [], "source": [ "from ragas.llms import LangchainLLMWrapper\n", "from ragas.embeddings import LangchainEmbeddingsWrapper\n", "from langchain_openai import ChatOpenAI\n", "from langchain_openai import OpenAIEmbeddings\n", "generator_llm = LangchainLLMWrapper(ChatOpenAI(model=\"gpt-4o\"))\n", "generator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings())" ] }, { "cell_type": "code", "execution_count": 49, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "4fe18d41fdd74b6fae35ef5380352540", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Applying SummaryExtractor: 0%| | 0/6 [00:00\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
user_inputreference_contextsreferencesynthesizer_name
0What does the Decision-Making Confidence Scale...[Linked Psychological & Physical Assessment\\nP...The Decision-Making Confidence Scale (DMCS-6) ...single_hop_specifc_query_synthesizer
1Wht is the Work-Related Stress Scale and how d...[Linked Psychological & Physical Assessment\\nP...The Work-Related Stress Scale (WRSS-8) evaluat...single_hop_specifc_query_synthesizer
2what cognitive load management scale do, how i...[Financial Stress Index (FSI-6)\\nThe FSI-6 eva...The Cognitive Load Management Scale (CLMS-7) m...single_hop_specifc_query_synthesizer
3What is the purpose of the Emotional Regulatio...[Financial Stress Index (FSI-6)\\nThe FSI-6 eva...The context does not provide specific informat...single_hop_specifc_query_synthesizer
4What does the MRI-6 assess?[The ERI-9 assesses an individual's ability to...The MRI-6 evaluates short-term and long-term m...single_hop_specifc_query_synthesizer
5What does the Social Confidence Measure (SCM-6...[The ERI-9 assesses an individual's ability to...The Social Confidence Measure (SCM-6) evaluate...single_hop_specifc_query_synthesizer
6What OFI-7 do?[Linked Psychological & Physical Assessment\\nC...The OFI-7 assesses work-related exhaustion and...single_hop_specifc_query_synthesizer
7Cud yu pleese explane wut the Chronic Pain Adj...[Linked Psychological & Physical Assessment\\nC...The Chronic Pain Adjustment Index (CPAI-10) ev...single_hop_specifc_query_synthesizer
8What CWT-7 do?[I feel confident when making important decisi...The CWT-7 evaluates an individual's ability to...single_hop_specifc_query_synthesizer
9Cud yu pleese explane how the COGNITIVE Worklo...[I feel confident when making important decisi...The Cognitive Workload Tolerance (CWT-7) evalu...single_hop_specifc_query_synthesizer
\n", "" ], "text/plain": [ " user_input \\\n", "0 What does the Decision-Making Confidence Scale... \n", "1 Wht is the Work-Related Stress Scale and how d... \n", "2 what cognitive load management scale do, how i... \n", "3 What is the purpose of the Emotional Regulatio... \n", "4 What does the MRI-6 assess? \n", "5 What does the Social Confidence Measure (SCM-6... \n", "6 What OFI-7 do? \n", "7 Cud yu pleese explane wut the Chronic Pain Adj... \n", "8 What CWT-7 do? \n", "9 Cud yu pleese explane how the COGNITIVE Worklo... \n", "\n", " reference_contexts \\\n", "0 [Linked Psychological & Physical Assessment\\nP... \n", "1 [Linked Psychological & Physical Assessment\\nP... \n", "2 [Financial Stress Index (FSI-6)\\nThe FSI-6 eva... \n", "3 [Financial Stress Index (FSI-6)\\nThe FSI-6 eva... \n", "4 [The ERI-9 assesses an individual's ability to... \n", "5 [The ERI-9 assesses an individual's ability to... \n", "6 [Linked Psychological & Physical Assessment\\nC... \n", "7 [Linked Psychological & Physical Assessment\\nC... \n", "8 [I feel confident when making important decisi... \n", "9 [I feel confident when making important decisi... \n", "\n", " reference \\\n", "0 The Decision-Making Confidence Scale (DMCS-6) ... \n", "1 The Work-Related Stress Scale (WRSS-8) evaluat... \n", "2 The Cognitive Load Management Scale (CLMS-7) m... \n", "3 The context does not provide specific informat... \n", "4 The MRI-6 evaluates short-term and long-term m... \n", "5 The Social Confidence Measure (SCM-6) evaluate... \n", "6 The OFI-7 assesses work-related exhaustion and... \n", "7 The Chronic Pain Adjustment Index (CPAI-10) ev... \n", "8 The CWT-7 evaluates an individual's ability to... \n", "9 The Cognitive Workload Tolerance (CWT-7) evalu... \n", "\n", " synthesizer_name \n", "0 single_hop_specifc_query_synthesizer \n", "1 single_hop_specifc_query_synthesizer \n", "2 single_hop_specifc_query_synthesizer \n", "3 single_hop_specifc_query_synthesizer \n", "4 single_hop_specifc_query_synthesizer \n", "5 single_hop_specifc_query_synthesizer \n", "6 single_hop_specifc_query_synthesizer \n", "7 single_hop_specifc_query_synthesizer \n", "8 single_hop_specifc_query_synthesizer \n", "9 single_hop_specifc_query_synthesizer " ] }, "execution_count": 50, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset.to_pandas()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Eval with base model" ] }, { "cell_type": "code", "execution_count": 51, "metadata": {}, "outputs": [], "source": [ "for test_row in dataset:\n", " response = base_rag_chain.invoke({\"question\" : test_row.eval_sample.user_input})\n", " test_row.eval_sample.response = response[\"response\"]\n", " test_row.eval_sample.retrieved_contexts = [context.page_content for context in response[\"context\"]]" ] }, { "cell_type": "code", "execution_count": 52, "metadata": {}, "outputs": [], "source": [ "from ragas.llms import LangchainLLMWrapper\n", "\n", "evaluator_llm = LangchainLLMWrapper(ChatOpenAI(model=\"gpt-4o\"))" ] }, { "cell_type": "code", "execution_count": 53, "metadata": {}, "outputs": [], "source": [ "from ragas import EvaluationDataset\n", "\n", "evaluation_dataset = EvaluationDataset.from_pandas(dataset.to_pandas())" ] }, { "cell_type": "code", "execution_count": 54, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "a098ed85762d4bbcb3983956c8e4d3e6", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Evaluating: 0%| | 0/60 [00:00