"
]
},
"execution_count": 36,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#!pip install wandb\n",
"\n",
"import wandb\n",
"wandb.init(mode=\"disabled\")"
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {},
"outputs": [],
"source": [
"# !pip install torch\n",
"# !pip install accelerate>=0.26.0\n",
"# !pip install transformers\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"outputs": [],
"source": [
"#!pip install --upgrade --force-reinstall transformers accelerate torch\n",
"#!which python\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
" \n",
" \n",
"
\n",
" [5/5 00:01, Epoch 5/5]\n",
"
\n",
" \n",
" \n",
" \n",
" Step | \n",
" Training Loss | \n",
" Validation Loss | \n",
" Cosine Accuracy@1 | \n",
" Cosine Accuracy@3 | \n",
" Cosine Accuracy@5 | \n",
" Cosine Accuracy@10 | \n",
" Cosine Precision@1 | \n",
" Cosine Precision@3 | \n",
" Cosine Precision@5 | \n",
" Cosine Precision@10 | \n",
" Cosine Recall@1 | \n",
" Cosine Recall@3 | \n",
" Cosine Recall@5 | \n",
" Cosine Recall@10 | \n",
" Cosine Ndcg@10 | \n",
" Cosine Mrr@10 | \n",
" Cosine Map@100 | \n",
"
\n",
" \n",
" \n",
" \n",
" 1 | \n",
" No log | \n",
" No log | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 0.333333 | \n",
" 0.200000 | \n",
" 0.100000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
"
\n",
" \n",
" 2 | \n",
" No log | \n",
" No log | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 0.333333 | \n",
" 0.200000 | \n",
" 0.100000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
"
\n",
" \n",
" 3 | \n",
" No log | \n",
" No log | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 0.333333 | \n",
" 0.200000 | \n",
" 0.100000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
"
\n",
" \n",
" 4 | \n",
" No log | \n",
" No log | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 0.333333 | \n",
" 0.200000 | \n",
" 0.100000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
"
\n",
" \n",
" 5 | \n",
" No log | \n",
" No log | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 0.333333 | \n",
" 0.200000 | \n",
" 0.100000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
" 1.000000 | \n",
"
\n",
" \n",
"
"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"warmup_steps = int(len(loader) * EPOCHS * 0.1)\n",
"\n",
"model.fit(\n",
" train_objectives=[(loader, train_loss)],\n",
" epochs=EPOCHS,\n",
" warmup_steps=warmup_steps,\n",
" output_path='models/midterm-compare-arctic-embed-m-ft',\n",
" show_progress_bar=True,\n",
" evaluator=evaluator,\n",
" evaluation_steps=50\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "c3832f15349447c59ef0b7950d732a59",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model.safetensors: 0%| | 0.00/436M [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"'https://huggingface.co/drewgenai/midterm-compare-arctic-embed-m-ft/commit/695a90e0d9d4a6ca560a5844c0e5a7cf4c4c74a9'"
]
},
"execution_count": 47,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model.push_to_hub(f\"{hf_username}/midterm-compare-arctic-embed-m-ft\")"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "5a84694a9cff451581d43a244cbd6ce5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"modules.json: 0%| | 0.00/349 [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "d9635815ad784cc68833f2b4199c611b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"config_sentence_transformers.json: 0%| | 0.00/281 [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b425eef83f6c47cf90d9ad8df35bed07",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"README.md: 0%| | 0.00/26.3k [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1c080b01bb4c43e3b0af3da190feff91",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"sentence_bert_config.json: 0%| | 0.00/53.0 [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "8ebbd4faaa99434fbd6413f24fadc8b1",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"config.json: 0%| | 0.00/675 [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "5ef43ded862f4e5685af4b66e51922af",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model.safetensors: 0%| | 0.00/436M [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Some weights of BertModel were not initialized from the model checkpoint at drewgenai/midterm-compare-arctic-embed-m-ft and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']\n",
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "f2704b3d8d214414acf54e23efb2de25",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer_config.json: 0%| | 0.00/1.41k [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "70d0aca65df94b8c973d9e2aef700c6b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"vocab.txt: 0%| | 0.00/232k [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b8a288bc2740416d8be044c1534138a0",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer.json: 0%| | 0.00/712k [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "fd5494a1b2d2483884ccdfeaaf03e65c",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"special_tokens_map.json: 0%| | 0.00/695 [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e6259269b65b45358940c42ac8e9d127",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"1_Pooling%2Fconfig.json: 0%| | 0.00/296 [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"finetune_embeddings = HuggingFaceEmbeddings(model_name=f\"{hf_username}/midterm-compare-arctic-embed-m-ft\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"###testingabove"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [],
"source": [
"\n",
"#!pip install -qU huggingface_hub\n",
"#!pip install -qU ipywidgets\n"
]
},
{
"cell_type": "code",
"execution_count": 49,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Some weights of BertModel were not initialized from the model checkpoint at drewgenai/demo-compare-arctic-embed-m-ft and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']\n",
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
]
}
],
"source": [
"from sentence_transformers import SentenceTransformer\n",
"from langchain.vectorstores import Qdrant\n",
"from langchain.embeddings import HuggingFaceEmbeddings\n",
"\n",
"\n",
"# Load the SentenceTransformer model\n",
"#model_id = \"Snowflake/snowflake-arctic-embed-m\"\n",
"model_id = f\"{hf_username}/demo-compare-arctic-embed-m-ft\" \n",
"embedding_model = HuggingFaceEmbeddings(model_name=model_id)\n",
"# model_id = \"Snowflake/snowflake-arctic-embed-m\"\n",
"# embedding_model = HuggingFaceEmbeddings(model_name=model_id)\n",
"# model_id = \"Snowflake/snowflake-arctic-embed-m-v2.0\"\n",
"# embedding_model = HuggingFaceEmbeddings(model_name=model_id, model_kwargs={\"trust_remote_code\": True})\n",
"\n",
"\n",
"# Load documents into Qdrant\n",
"qdrant_vectorstore = Qdrant.from_documents(\n",
" documents_with_metadata,\n",
" embedding_model,\n",
" location=\":memory:\", # In-memory for testing\n",
" collection_name=\"document_comparison\",\n",
")\n",
"\n",
"# Create a retriever\n",
"qdrant_retriever = qdrant_vectorstore.as_retriever()"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [],
"source": [
"# from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"# RAG_PROMPT = \"\"\"\n",
"# CONTEXT:\n",
"# {context}\n",
"\n",
"# QUERY:\n",
"# {question}\n",
"\n",
"# You are a helpful assistant. Use the available context to answer the question. If you can't answer the question, say you don't know.\n",
"# \"\"\"\n",
"\n",
"# rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)\n",
"\n",
"# from langchain_openai import ChatOpenAI\n",
"\n",
"# #openai_chat_model = ChatOpenAI(model=\"gpt-4o\")\n",
"# openai_chat_model = ChatOpenAI(model=\"gpt-4o-mini\")\n",
"\n",
"# from operator import itemgetter\n",
"# from langchain.schema.output_parser import StrOutputParser\n",
"\n",
"# rag_chain = (\n",
"# {\"context\": itemgetter(\"question\") | qdrant_retriever, \"question\": itemgetter(\"question\")}\n",
"# | rag_prompt | openai_chat_model | StrOutputParser()\n",
"# )"
]
},
{
"cell_type": "code",
"execution_count": 50,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"RAG_PROMPT = \"\"\"\n",
"CONTEXT:\n",
"{context}\n",
"\n",
"QUERY:\n",
"{question}\n",
"\n",
"You are a helpful assistant. Use the available context to answer the question.\n",
"\n",
"Return the response in **valid JSON format** with the following structure:\n",
"\n",
"[\n",
" {{\n",
" \"Derived Description\": \"A short name for the matched concept\",\n",
" \"Protocol_1_Name\": \"Protocol 1 - Matching Element\",\n",
" \"Protocol_2_Name\": \"Protocol 2 - Matching Element\"\n",
" }},\n",
" ...\n",
"]\n",
"\n",
"### Rules:\n",
"1. Only output **valid JSON** with no explanations, summaries, or markdown formatting.\n",
"2. Ensure each entry in the JSON list represents a single matched data element from the two protocols.\n",
"3. If no matching element is found in a protocol, leave it empty (\"\").\n",
"4. **Do NOT include headers, explanations, or additional formatting**—only return the raw JSON list.\n",
"5. It should include all the elements in the two protocols.\n",
"6. If it cannot match the element, create the row and include the protocol it did find and put \"could not match\" in the other protocol column.\n",
"\"\"\"\n",
"\n",
"rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"#openai_chat_model = ChatOpenAI(model=\"gpt-4o\")\n",
"openai_chat_model = ChatOpenAI(model=\"gpt-4o-mini\")\n",
"\n",
"from operator import itemgetter\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"\n",
"rag_chain = (\n",
" {\"context\": itemgetter(\"question\") | qdrant_retriever, \"question\": itemgetter(\"question\")}\n",
" | rag_prompt | openai_chat_model | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [],
"source": [
"question_text = \"\"\"Between these two files containing protocols, can you find the data elements in each that most likely match the element in the other and output a CSV that lists three columns:\n",
"\n",
"The questions within elements will be similar between the two documents and can be used to match the elements.\n",
"\n",
"1. Derived description from the two documents describing the index/measure/scale.\n",
"2. A column for each standard.\n",
"3. In the column for each name/version, the data element used to capture that description.\n",
"\n",
"There should only be one row for each scale/index/etc.\n",
"The description should not be one of the questions but a name that best describes the similar data elements.\"\"\"\n",
"\n",
"response_text = rag_chain.invoke({\"question\": question_text})\n",
"# response = rag_chain.invoke({\"question\": question_text})"
]
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"✅ CSV file saved: matching_data_elements.csv\n"
]
}
],
"source": [
"import json\n",
"import pandas as pd\n",
"\n",
"def parse_rag_output(response_text):\n",
" \"\"\"Extract structured JSON data from the RAG response.\"\"\"\n",
" try:\n",
" structured_data = json.loads(response_text)\n",
"\n",
" # Ensure similarity score is always included\n",
" for item in structured_data:\n",
" item.setdefault(\"Similarity Score\", \"N/A\") # Default if missing\n",
"\n",
" return structured_data\n",
" except json.JSONDecodeError:\n",
" print(\"Error: Response is not valid JSON.\")\n",
" return None\n",
"\n",
"def save_to_csv(data, directory=\"./output\", filename=\"matching_data_elements.csv\"):\n",
" \"\"\"Save structured data to CSV.\"\"\"\n",
" if not data:\n",
" print(\"No data to save.\")\n",
" return\n",
"\n",
" file_path = os.path.join(directory, filename)\n",
" df = pd.DataFrame(data, columns=[\"Derived Description\", \"Protocol_1_Name\", \"Protocol_2_Name\"]) # Ensure correct columns\n",
" df.to_csv(file_path, index=False)\n",
" print(f\"✅ CSV file saved: {filename}\")\n",
"\n",
"# Run the pipeline\n",
"structured_output = parse_rag_output(response_text)\n",
"save_to_csv(structured_output)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [],
"source": [
"# rag_chain.invoke({\"question\" : \"Based on the types of questions asked under each heading. can you identify the headings in one document that most closely match the second document. list them e.g paincoping/doc1 painstrategy/doc2\"})"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [],
"source": [
"# rag_chain.invoke({\"question\" : \"Based on the types of questions asked under each heading. can you identify the headings in one document that most closely match the second document. list them e.g paincoping/doc1 painstrategy/doc2. these are example headings not the ones in the actual documents. just list the matches not the rational. Can you list multiple matches?\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}