diff --git a/.DS_Store b/.DS_Store
deleted file mode 100644
index 8919d225f87d2240d51e2e5c254fb7c8cfa6d673..0000000000000000000000000000000000000000
Binary files a/.DS_Store and /dev/null differ
diff --git a/all_sources_contextual_nodes.pkl b/all_sources_contextual_nodes.pkl
deleted file mode 100644
index f9cd388550ad50878de8d5e5f6307c430121b9a5..0000000000000000000000000000000000000000
--- a/all_sources_contextual_nodes.pkl
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a3fb01a8d69f1af5b8c9d863a0a40d109f812696a43a6ce2a3b420458be4bc49
-size 112785806
diff --git a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/data_level0.bin b/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/data_level0.bin
deleted file mode 100644
index 6ba5ef404e920c98721320621e695d1b7a4933a4..0000000000000000000000000000000000000000
--- a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/data_level0.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:e8b876ac2e179211d41424ec19f40153fa118545766dee59f9753a73b04f350f
-size 135552000
diff --git a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/header.bin b/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/header.bin
deleted file mode 100644
index 4b20c1f245ff745c69798d1c2013ee0d3628ec2c..0000000000000000000000000000000000000000
--- a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/header.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:595a4f1b655e01205b66b5f692d44a48da44acf9a2ad5155a223d082d235bae3
-size 100
diff --git a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/index_metadata.pickle b/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/index_metadata.pickle
deleted file mode 100644
index aa4e079bae719d66c7dbe23c07560e4454cce0ff..0000000000000000000000000000000000000000
--- a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/index_metadata.pickle
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:ef4d9945fed0d6ed7a0b8a619af49e5c8524c1a61e2a61c5dfd554e6af9ffb3d
-size 1854390
diff --git a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/length.bin b/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/length.bin
deleted file mode 100644
index ce581a65b292ffc109322e99f9d0bdeaf9a9ee75..0000000000000000000000000000000000000000
--- a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/length.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:6763f97c8f35167fe68a1f7b8b22a14a3e148614522e9df6ad82ac65c24c4cb0
-size 128000
diff --git a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/link_lists.bin b/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/link_lists.bin
deleted file mode 100644
index 5a55b47ebff075542069e1266b6a0d3e57841009..0000000000000000000000000000000000000000
--- a/chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/link_lists.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:fdfa1a217488e83bd57b7d13985ce0f4379f577eec289316b442b3ebe7dbb79f
-size 277872
diff --git a/chroma-db-all_sources/chroma.sqlite3 b/chroma-db-all_sources/chroma.sqlite3
deleted file mode 100644
index da6c5cf92febc6443c13c19608fca0fc7ca61737..0000000000000000000000000000000000000000
--- a/chroma-db-all_sources/chroma.sqlite3
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:f8bc923548681b7c546e6e45d6dfd796876b67066118cf3fe6b22cfeca1524b2
-size 947904512
diff --git a/chroma-db-all_sources/document_dict_all_sources.pkl b/chroma-db-all_sources/document_dict_all_sources.pkl
deleted file mode 100644
index d22dcb964e4a67b91abffdae6d780a2e734983c9..0000000000000000000000000000000000000000
--- a/chroma-db-all_sources/document_dict_all_sources.pkl
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:4bee37cedea3099ac8b65a74904688d771f624c97097f0c67e65b163b3967b22
-size 87955987
diff --git a/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/data_level0.bin b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/data_level0.bin
new file mode 100644
index 0000000000000000000000000000000000000000..742502d1fc1eb15cbc0055ce1791bc2da7d14c18
--- /dev/null
+++ b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/data_level0.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7b31772d7b492860c7d8a5bf5009e837e4210f36db02b577d200213ec74a1c6
+size 74568000
diff --git a/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/header.bin b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/header.bin
new file mode 100644
index 0000000000000000000000000000000000000000..1ad250315e692143e9f811c0360c99e59688fba1
--- /dev/null
+++ b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/header.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb0d9006c0a810bed3cf70ce96081931f4ca52fba11d05376a99d4e432d9d994
+size 100
diff --git a/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/index_metadata.pickle b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/index_metadata.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..089b60c174eafc7322c35132c34a883d27574f64
--- /dev/null
+++ b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/index_metadata.pickle
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:43b1ec3c7d4b11231e551e43c43dc6f8c6cbf3221517f7ed1e54afd70f6e08a0
+size 346117
diff --git a/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/length.bin b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/length.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4465780e718f5ace12641eb64a7f6e34d072cbc4
--- /dev/null
+++ b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/length.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b30b7d36428adb2def6746197d2a25c90b0dc6c7e0bcfd6216bfdc81dc6ad98
+size 24000
diff --git a/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/link_lists.bin b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/link_lists.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ce9e4f16d1fdddbd1701dcdcd876a62d4a3baed0
--- /dev/null
+++ b/chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/link_lists.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:83cab85fce66e7f40c7b93609e7b34d9970f8dd7fb0ec8ed3ca9691f7d515b84
+size 52220
diff --git a/chroma-db-langchain/chroma.sqlite3 b/chroma-db-langchain/chroma.sqlite3
new file mode 100644
index 0000000000000000000000000000000000000000..f68a0c47afe98db21e0984dc2bd943f9e758b3a4
--- /dev/null
+++ b/chroma-db-langchain/chroma.sqlite3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fb702f4ed770cf0f0630d4d9c999de16409e95f0708cc6d4bc41f9b6758e0c0
+size 223997952
diff --git a/chroma-db-langchain/document_dict_langchain.pkl b/chroma-db-langchain/document_dict_langchain.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..e7837a582dcec14c6d535eb11c9c4c8ca4e8b833
--- /dev/null
+++ b/chroma-db-langchain/document_dict_langchain.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9288ead475c396868d4046b709ba6b4704b469dc10d571d61e2ac4a651dc8360
+size 9495017
diff --git a/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/data_level0.bin b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/data_level0.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d0c800f36d10e1b231b6da284f9fa1777f6bc8cc
--- /dev/null
+++ b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/data_level0.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff775114aa9ea2874506dc5fb42fb0cb40c8aba1d39a5ccc40c0d3e01fc617fe
+size 74568000
diff --git a/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/header.bin b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/header.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b916f30a920cf40aee5ef2aa7d5ca71722a084c9
--- /dev/null
+++ b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/header.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6485506af204d2b936b1f28bc63bcc7b791d4b431bc168bdfef9290d9059fe73
+size 100
diff --git a/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/index_metadata.pickle b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/index_metadata.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..211c771e49eb054eb47466ccbfa95db8e81faa09
--- /dev/null
+++ b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/index_metadata.pickle
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e4a0ed52b4a277d65769ca116592388b62dc31871eabdb3504d84c656914321
+size 346117
diff --git a/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/length.bin b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/length.bin
new file mode 100644
index 0000000000000000000000000000000000000000..38eaecda6c36ea8cd40f0f12f95631ff34304cda
--- /dev/null
+++ b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/length.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fec5cf06bc6bc8d7e43df5ec03e41faaf11b25a94375e31500d22aad8d9b19b3
+size 24000
diff --git a/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/link_lists.bin b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/link_lists.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4aaa3be84f66a3cb6bca5dd5cd2197809db2a438
--- /dev/null
+++ b/chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/link_lists.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e9702fd3fab5b9e0a9b3495b9051dcbec394bf49a94c84503c00f2c59468e2c
+size 52152
diff --git a/chroma-db-llama_index/chroma.sqlite3 b/chroma-db-llama_index/chroma.sqlite3
new file mode 100644
index 0000000000000000000000000000000000000000..5fe110587c41e8ee99dcfcb64e38d093f780ac9c
--- /dev/null
+++ b/chroma-db-llama_index/chroma.sqlite3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f9f841426404c5901ac2f13ffc1c7224cb2783cf4e0276ffbc5783f1426cb29
+size 205246464
diff --git a/chroma-db-llama_index/document_dict_llama_index.pkl b/chroma-db-llama_index/document_dict_llama_index.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..378a38b3c5510309629003cd47241f4916950d33
--- /dev/null
+++ b/chroma-db-llama_index/document_dict_llama_index.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5153123cbc6d2e83c1d6b60d23dd5afee00bd9a4967143b9e8f30f1792c5e932
+size 8954720
diff --git a/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/data_level0.bin b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/data_level0.bin
new file mode 100644
index 0000000000000000000000000000000000000000..fc3262315bd5d0cf4f4eae12837fd34a64c325b1
--- /dev/null
+++ b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/data_level0.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93df5543a015938eddd1f7c3cf53c35de01709be02c54836a47a1b445a39941c
+size 24856000
diff --git a/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/header.bin b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/header.bin
new file mode 100644
index 0000000000000000000000000000000000000000..fc17515de8826e1f9df61cd05bd2d37fca16d77e
--- /dev/null
+++ b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/header.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5adccd0a9e9b2c539168b73e6cc6ce867211ec92bdfbe077126f0620285ad69d
+size 100
diff --git a/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/index_metadata.pickle b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/index_metadata.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..da4ece59d9da22a98dd3245e5203e34bd98bcb93
--- /dev/null
+++ b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/index_metadata.pickle
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5dbbac2bbab35c9a2282f4d5edba22b4da61d44a03cd94a6dfed3a957ec84603
+size 114057
diff --git a/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/length.bin b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/length.bin
new file mode 100644
index 0000000000000000000000000000000000000000..9d9a31cadde53c3950b0b517d18f327c87cd2226
--- /dev/null
+++ b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/length.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:448e2b40f4fab352a4c0c747a4c13d28a753e48374f6f96c9cfbd8f153ea30f9
+size 8000
diff --git a/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/link_lists.bin b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/link_lists.bin
new file mode 100644
index 0000000000000000000000000000000000000000..191ce0b20d2b1c0d9cc99538920c5257d1532d91
--- /dev/null
+++ b/chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/link_lists.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eea6193ad5b15addc24b2bb8a6381d88976c55d7e8729fc78db5ab9909f782c8
+size 17316
diff --git a/chroma-db-openai_cookbooks/chroma.sqlite3 b/chroma-db-openai_cookbooks/chroma.sqlite3
new file mode 100644
index 0000000000000000000000000000000000000000..a82cfcc2a079937723d418734655419ad35bca25
--- /dev/null
+++ b/chroma-db-openai_cookbooks/chroma.sqlite3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f306cdfca7f7d2899118eb12fb1fc1b7393ebf80f70e358340fe9d6ea87e33e0
+size 83746816
diff --git a/chroma-db-openai_cookbooks/document_dict_openai_cookbooks.pkl b/chroma-db-openai_cookbooks/document_dict_openai_cookbooks.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..3ef5f5d32325be6439145308155de936fa1858d1
--- /dev/null
+++ b/chroma-db-openai_cookbooks/document_dict_openai_cookbooks.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f3c410a0972b9c2dfa286ad03449d8024704fbcb0b313f126ae10dd1d7b94f21
+size 3490619
diff --git a/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/data_level0.bin b/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/data_level0.bin
new file mode 100644
index 0000000000000000000000000000000000000000..83c40da769af21f896cd756b10e0db50f5b7bba4
--- /dev/null
+++ b/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/data_level0.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b8d4b3825a7c7a773e22fa3eeef0e7d15a695f5c4183aeff5beb07741a68679
+size 12428000
diff --git a/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/header.bin b/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/header.bin
new file mode 100644
index 0000000000000000000000000000000000000000..162c71457f2bf981c3af5bd31bebf392ae468e14
--- /dev/null
+++ b/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/header.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8a3ec48846fc6fdfaef19f5ed2508f0bf3da4a3c93b0f6b3dd21f0a22ec1026
+size 100
diff --git a/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/length.bin b/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/length.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2915accb6a0d21bcb2ca0b8087696280a3772435
--- /dev/null
+++ b/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/length.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4c65d4f4f981c64a2613d4b82d32fcf22dca9ebfa2cfaffdd4e12e54e890a1d1
+size 4000
diff --git a/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/link_lists.bin b/chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/link_lists.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/chroma-db-peft/chroma.sqlite3 b/chroma-db-peft/chroma.sqlite3
new file mode 100644
index 0000000000000000000000000000000000000000..87cbe1eda9b0c48e3a76c4b71307fa5e0f654d81
--- /dev/null
+++ b/chroma-db-peft/chroma.sqlite3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b0321f854c294da9564e7e90ccb11b3190bd3d900d4606250fb1ccbaabd83be
+size 5226496
diff --git a/chroma-db-peft/document_dict_peft.pkl b/chroma-db-peft/document_dict_peft.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..79e341aebaa75c793c7a3d4e94354ee1535b06ed
--- /dev/null
+++ b/chroma-db-peft/document_dict_peft.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69ea3f661fbc9d85496d6cf77a09cb545998b1f0ebe4a8fb91865444dbfcffae
+size 260392
diff --git a/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/data_level0.bin b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/data_level0.bin
new file mode 100644
index 0000000000000000000000000000000000000000..99a6bde885a40d464d600c342419a8a5dfa03502
--- /dev/null
+++ b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/data_level0.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:71dacc58c9b86fda98eba379dabcb91f62ed3a10d381647faa10d0e43889ff4f
+size 12428000
diff --git a/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/header.bin b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/header.bin
new file mode 100644
index 0000000000000000000000000000000000000000..17f5d0f10a25bea321dda3cf2a655383cae45c1f
--- /dev/null
+++ b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/header.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db3337c9290bd8362d7849233bb2ce47b0b5a48d1790b5db251bd3ecb56a8fd4
+size 100
diff --git a/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/index_metadata.pickle b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/index_metadata.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..83cf350bd7d04d707377767ffae60abd6b50fe42
--- /dev/null
+++ b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/index_metadata.pickle
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f2ff58c1cdba77e74cbd707994a2cceaddcb890e9f11ae38a6b1fae30af5e4e
+size 56042
diff --git a/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/length.bin b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/length.bin
new file mode 100644
index 0000000000000000000000000000000000000000..fb155d822b6ca4351e2925bbc2dba0f49f05e0ab
--- /dev/null
+++ b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/length.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fc19b1997119425765295aeab72d76faa6927d4f83985d328c26f20468d6cc76
+size 4000
diff --git a/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/link_lists.bin b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/link_lists.bin
new file mode 100644
index 0000000000000000000000000000000000000000..84cf0e8546384807ecfde0f7c29870bbc5a58ef2
--- /dev/null
+++ b/chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/link_lists.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8508e40fb725d5c517c803a091d55067abf0479de7fb605d36cdcfaa454a4eb
+size 8148
diff --git a/chroma-db-transformers/chroma.sqlite3 b/chroma-db-transformers/chroma.sqlite3
new file mode 100644
index 0000000000000000000000000000000000000000..ce956a7dc607128db2b672dd7028858a4ea02f15
--- /dev/null
+++ b/chroma-db-transformers/chroma.sqlite3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53ece68c3b8a7c87f4b630e542127601194dcc4d97ac9d2a236938b575e33ae6
+size 63442944
diff --git a/chroma-db-transformers/document_dict_transformers.pkl b/chroma-db-transformers/document_dict_transformers.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..35d603880e1bff22c73231825adbab940ff58250
--- /dev/null
+++ b/chroma-db-transformers/document_dict_transformers.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0eb21c73e0cf7ef0970b615c66df67dae4e973befea9fdd22721dd69b0939231
+size 3166114
diff --git a/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/data_level0.bin b/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/data_level0.bin
new file mode 100644
index 0000000000000000000000000000000000000000..83c40da769af21f896cd756b10e0db50f5b7bba4
--- /dev/null
+++ b/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/data_level0.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b8d4b3825a7c7a773e22fa3eeef0e7d15a695f5c4183aeff5beb07741a68679
+size 12428000
diff --git a/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/header.bin b/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/header.bin
new file mode 100644
index 0000000000000000000000000000000000000000..162c71457f2bf981c3af5bd31bebf392ae468e14
--- /dev/null
+++ b/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/header.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8a3ec48846fc6fdfaef19f5ed2508f0bf3da4a3c93b0f6b3dd21f0a22ec1026
+size 100
diff --git a/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/length.bin b/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/length.bin
new file mode 100644
index 0000000000000000000000000000000000000000..68a515e57d322190b05454b9a97d2d285229197c
--- /dev/null
+++ b/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/length.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b8d924c7d1367cea6fd3c8fa8df0be395d4f62bf898bf07df588aa3140d7b61
+size 4000
diff --git a/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/link_lists.bin b/chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/link_lists.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/chroma-db-trl/chroma.sqlite3 b/chroma-db-trl/chroma.sqlite3
new file mode 100644
index 0000000000000000000000000000000000000000..e927fdfc095ff3d14f61c3a2bb42a8e07d961d6d
--- /dev/null
+++ b/chroma-db-trl/chroma.sqlite3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:066e040cdf71203d2d2f90a870854e9fd16418d5dc1229fccf52d2887cec1c5c
+size 5292032
diff --git a/chroma-db-trl/document_dict_trl.pkl b/chroma-db-trl/document_dict_trl.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..40a7866557a5dc77372c3d13a4e375e26d2937af
--- /dev/null
+++ b/chroma-db-trl/document_dict_trl.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea2cf47511ef464f04c458d950cae2ac158a10cca40a338bacb9e38351223375
+size 264000
diff --git a/langchain_md_files/_templates/integration.mdx b/langchain_md_files/_templates/integration.mdx
deleted file mode 100644
index 5e686ad3fc1224b148363a4dbd9a3ac8f133fb11..0000000000000000000000000000000000000000
--- a/langchain_md_files/_templates/integration.mdx
+++ /dev/null
@@ -1,60 +0,0 @@
-[comment: Please, a reference example here "docs/integrations/arxiv.md"]::
-[comment: Use this template to create a new .md file in "docs/integrations/"]::
-
-# Title_REPLACE_ME
-
-[comment: Only one Tile/H1 is allowed!]::
-
->
-[comment: Description: After reading this description, a reader should decide if this integration is good enough to try/follow reading OR]::
-[comment: go to read the next integration doc. ]::
-[comment: Description should include a link to the source for follow reading.]::
-
-## Installation and Setup
-
-[comment: Installation and Setup: All necessary additional package installations and setups for Tokens, etc]::
-
-```bash
-pip install package_name_REPLACE_ME
-```
-
-[comment: OR this text:]::
-
-There isn't any special setup for it.
-
-[comment: The next H2/## sections with names of the integration modules, like "LLM", "Text Embedding Models", etc]::
-[comment: see "Modules" in the "index.html" page]::
-[comment: Each H2 section should include a link to an example(s) and a Python code with the import of the integration class]::
-[comment: Below are several example sections. Remove all unnecessary sections. Add all necessary sections not provided here.]::
-
-## LLM
-
-See a [usage example](/docs/integrations/llms/INCLUDE_REAL_NAME).
-
-```python
-from langchain_community.llms import integration_class_REPLACE_ME
-```
-
-## Text Embedding Models
-
-See a [usage example](/docs/integrations/text_embedding/INCLUDE_REAL_NAME).
-
-```python
-from langchain_community.embeddings import integration_class_REPLACE_ME
-```
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/INCLUDE_REAL_NAME).
-
-```python
-from langchain_community.chat_models import integration_class_REPLACE_ME
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/INCLUDE_REAL_NAME).
-
-```python
-from langchain_community.document_loaders import integration_class_REPLACE_ME
-```
diff --git a/langchain_md_files/additional_resources/arxiv_references.mdx b/langchain_md_files/additional_resources/arxiv_references.mdx
deleted file mode 100644
index fb843aa0c3cdca923a7f85e5a3838f2c38f17b9b..0000000000000000000000000000000000000000
--- a/langchain_md_files/additional_resources/arxiv_references.mdx
+++ /dev/null
@@ -1,1101 +0,0 @@
-# arXiv
-            
-LangChain implements the latest research in the field of Natural Language Processing.
-This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
- Templates, and Cookbooks.
-
-From the opposite direction, scientists use `LangChain` in research and reference it in the research papers. 
-
-`arXiv` papers with references to:
- [LangChain](https://arxiv.org/search/?query=langchain&searchtype=all&source=header) | [LangGraph](https://arxiv.org/search/?query=langgraph&searchtype=all&source=header) | [LangSmith](https://arxiv.org/search/?query=langsmith&searchtype=all&source=header)
-
-## Summary
-
-| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
-|------------------|---------|-------------------|------------------------|
-| `2403.14403v2` [Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity](http://arxiv.org/abs/2403.14403v2) | Soyeong Jeong, Jinheon Baek, Sukmin Cho,  et al. | 2024‑03‑21 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
-| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren,  et al. | 2024‑02‑06 | `Cookbook:` [Self-Discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
-| `2402.03367v2` [RAG-Fusion: a New Take on Retrieval-Augmented Generation](http://arxiv.org/abs/2402.03367v2) | Zackary Rackauckas | 2024‑01‑31 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
-| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli,  et al. | 2024‑01‑31 | `Cookbook:` [Raptor](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
-| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu,  et al. | 2024‑01‑29 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Cookbook:` [Langgraph Crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
-| `2401.08500v1` [Code Generation with AlphaCodium: From Prompt Engineering to Flow Engineering](http://arxiv.org/abs/2401.08500v1) | Tal Ridnik, Dedy Kredo, Itamar Friedman | 2024‑01‑16 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
-| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux,  et al. | 2024‑01‑08 | `Cookbook:` [Together Ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
-| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen,  et al. | 2023‑12‑11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
-| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan,  et al. | 2023‑11‑15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
-| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang,  et al. | 2023‑10‑17 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Cookbook:` [Langgraph Self Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
-| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen,  et al. | 2023‑10‑09 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [Stepback-Qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
-| `2307.15337v3` [Skeleton-of-Thought: Prompting LLMs for Efficient Parallel Generation](http://arxiv.org/abs/2307.15337v3) | Xuefei Ning, Zinan Lin, Zixuan Zhou,  et al. | 2023‑07‑28 | `Template:` [skeleton-of-thought](https://python.langchain.com/docs/templates/skeleton-of-thought)
-| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone,  et al. | 2023‑07‑18 | `Cookbook:` [Semi Structured Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
-| `2307.03172v3` [Lost in the Middle: How Language Models Use Long Contexts](http://arxiv.org/abs/2307.03172v3) | Nelson F. Liu, Kevin Lin, John Hewitt,  et al. | 2023‑07‑06 | `Docs:` [docs/how_to/long_context_reorder](https://python.langchain.com/docs/how_to/long_context_reorder)
-| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He,  et al. | 2023‑05‑23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [Rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
-| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023‑05‑15 | `API:` [langchain_experimental.tot](https://python.langchain.com/api_reference/experimental/tot.html), `Cookbook:` [Tree Of Thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
-| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan,  et al. | 2023‑05‑06 | `Cookbook:` [Plan And Execute Agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
-| `2305.02156v1` [Zero-Shot Listwise Document Reranking with a Large Language Model](http://arxiv.org/abs/2305.02156v1) | Xueguang Ma, Xinyu Zhang, Ronak Pradeep,  et al. | 2023‑05‑03 | `Docs:` [docs/how_to/contextual_compression](https://python.langchain.com/docs/how_to/contextual_compression), `API:` [langchain...LLMListwiseRerank](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#)
-| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu,  et al. | 2023‑04‑17 | `Cookbook:` [Semi Structured Multi Modal Rag Llama2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb), [Semi Structured And Multi Modal Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)
-| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai,  et al. | 2023‑04‑07 | `Cookbook:` [Generative Agents Interactive Simulacra Of Human Behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [Multiagent Bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
-| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani,  et al. | 2023‑03‑31 | `Cookbook:` [Camel Role Playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
-| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan,  et al. | 2023‑03‑30 | `API:` [langchain_experimental.autonomous_agents](https://python.langchain.com/api_reference/experimental/autonomous_agents.html), `Cookbook:` [Hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
-| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen,  et al. | 2023‑01‑24 | `API:` [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
-| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin,  et al. | 2022‑12‑20 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [Hypothetical Document Embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
-| `2212.08073v1` [Constitutional AI: Harmlessness from AI Feedback](http://arxiv.org/abs/2212.08073v1) | Yuntao Bai, Saurav Kadavath, Sandipan Kundu,  et al. | 2022‑12‑15 | `Docs:` [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/docs/versions/migrating_chains/constitutional_chain)
-| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande,  et al. | 2022‑12‑12 | `API:` [langchain_experimental.fallacy_removal](https://python.langchain.com/api_reference/experimental/fallacy_removal.html)
-| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz,  et al. | 2022‑11‑25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
-| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou,  et al. | 2022‑11‑18 | `API:` [langchain_experimental.pal_chain](https://python.langchain.com/api_reference/experimental/pal_chain.html), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), `Cookbook:` [Program Aided Language Model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
-| `2210.11934v2` [An Analysis of Fusion Functions for Hybrid Retrieval](http://arxiv.org/abs/2210.11934v2) | Sebastian Bruch, Siyu Gai, Amir Ingber | 2022‑10‑21 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
-| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu,  et al. | 2022‑10‑06 | `Docs:` [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/docs/concepts), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
-| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan,  et al. | 2022‑09‑22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
-| `2205.13147v4` [Matryoshka Representation Learning](http://arxiv.org/abs/2205.13147v4) | Aditya Kusupati, Gantavya Bhatt, Aniket Rege,  et al. | 2022‑05‑26 | `Docs:` [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
-| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022‑05‑25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
-| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022‑03‑15 | `Docs:` [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa), `API:` [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
-| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher,  et al. | 2022‑02‑01 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
-| `2112.01488v3` [ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction](http://arxiv.org/abs/2112.01488v3) | Keshav Santhanam, Omar Khattab, Jon Saad-Falcon,  et al. | 2021‑12‑02 | `Docs:` [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
-| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy,  et al. | 2021‑02‑26 | `API:` [langchain_experimental.open_clip](https://python.langchain.com/api_reference/experimental/open_clip.html)
-| `2005.14165v4` [Language Models are Few-Shot Learners](http://arxiv.org/abs/2005.14165v4) | Tom B. Brown, Benjamin Mann, Nick Ryder,  et al. | 2020‑05‑28 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
-| `2005.11401v4` [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](http://arxiv.org/abs/2005.11401v4) | Patrick Lewis, Ethan Perez, Aleksandra Piktus,  et al. | 2020‑05‑22 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
-| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney,  et al. | 2019‑09‑11 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
-
-## Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity
-
-- **Authors:** Soyeong Jeong, Jinheon Baek, Sukmin Cho,  et al.
-- **arXiv id:** [2403.14403v2](http://arxiv.org/abs/2403.14403v2)  **Published Date:** 2024-03-21
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-
-**Abstract:** Retrieval-Augmented Large Language Models (LLMs), which incorporate the
-non-parametric knowledge from external knowledge bases into LLMs, have emerged
-as a promising approach to enhancing response accuracy in several tasks, such
-as Question-Answering (QA). However, even though there are various approaches
-dealing with queries of different complexities, they either handle simple
-queries with unnecessary computational overhead or fail to adequately address
-complex multi-step queries; yet, not all user requests fall into only one of
-the simple or complex categories. In this work, we propose a novel adaptive QA
-framework, that can dynamically select the most suitable strategy for
-(retrieval-augmented) LLMs from the simplest to the most sophisticated ones
-based on the query complexity. Also, this selection process is operationalized
-with a classifier, which is a smaller LM trained to predict the complexity
-level of incoming queries with automatically collected labels, obtained from
-actual predicted outcomes of models and inherent inductive biases in datasets.
-This approach offers a balanced strategy, seamlessly adapting between the
-iterative and single-step retrieval-augmented LLMs, as well as the no-retrieval
-methods, in response to a range of query complexities. We validate our model on
-a set of open-domain QA datasets, covering multiple query complexities, and
-show that ours enhances the overall efficiency and accuracy of QA systems,
-compared to relevant baselines including the adaptive retrieval approaches.
-Code is available at: https://github.com/starsuzi/Adaptive-RAG.
-                
-## Self-Discover: Large Language Models Self-Compose Reasoning Structures
-
-- **Authors:** Pei Zhou, Jay Pujara, Xiang Ren,  et al.
-- **arXiv id:** [2402.03620v1](http://arxiv.org/abs/2402.03620v1)  **Published Date:** 2024-02-06
-- **LangChain:**
-
-   - **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
-
-**Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the
-task-intrinsic reasoning structures to tackle complex reasoning problems that
-are challenging for typical prompting methods. Core to the framework is a
-self-discovery process where LLMs select multiple atomic reasoning modules such
-as critical thinking and step-by-step thinking, and compose them into an
-explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER
-substantially improves GPT-4 and PaLM 2's performance on challenging reasoning
-benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as
-much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER
-outperforms inference-intensive methods such as CoT-Self-Consistency by more
-than 20%, while requiring 10-40x fewer inference compute. Finally, we show that
-the self-discovered reasoning structures are universally applicable across
-model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share
-commonalities with human reasoning patterns.
-                
-## RAG-Fusion: a New Take on Retrieval-Augmented Generation
-
-- **Authors:** Zackary Rackauckas
-- **arXiv id:** [2402.03367v2](http://arxiv.org/abs/2402.03367v2)  **Published Date:** 2024-01-31
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-
-**Abstract:** Infineon has identified a need for engineers, account managers, and customers
-to rapidly obtain product information. This problem is traditionally addressed
-with retrieval-augmented generation (RAG) chatbots, but in this study, I
-evaluated the use of the newly popularized RAG-Fusion method. RAG-Fusion
-combines RAG and reciprocal rank fusion (RRF) by generating multiple queries,
-reranking them with reciprocal scores and fusing the documents and scores.
-Through manually evaluating answers on accuracy, relevance, and
-comprehensiveness, I found that RAG-Fusion was able to provide accurate and
-comprehensive answers due to the generated queries contextualizing the original
-query from various perspectives. However, some answers strayed off topic when
-the generated queries' relevance to the original query is insufficient. This
-research marks significant progress in artificial intelligence (AI) and natural
-language processing (NLP) applications and demonstrates transformations in a
-global and multi-industry context.
-                
-## RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
-
-- **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli,  et al.
-- **arXiv id:** [2401.18059v1](http://arxiv.org/abs/2401.18059v1)  **Published Date:** 2024-01-31
-- **LangChain:**
-
-   - **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
-
-**Abstract:** Retrieval-augmented language models can better adapt to changes in world
-state and incorporate long-tail knowledge. However, most existing methods
-retrieve only short contiguous chunks from a retrieval corpus, limiting
-holistic understanding of the overall document context. We introduce the novel
-approach of recursively embedding, clustering, and summarizing chunks of text,
-constructing a tree with differing levels of summarization from the bottom up.
-At inference time, our RAPTOR model retrieves from this tree, integrating
-information across lengthy documents at different levels of abstraction.
-Controlled experiments show that retrieval with recursive summaries offers
-significant improvements over traditional retrieval-augmented LMs on several
-tasks. On question-answering tasks that involve complex, multi-step reasoning,
-we show state-of-the-art results; for example, by coupling RAPTOR retrieval
-with the use of GPT-4, we can improve the best performance on the QuALITY
-benchmark by 20% in absolute accuracy.
-                
-## Corrective Retrieval Augmented Generation
-
-- **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu,  et al.
-- **arXiv id:** [2401.15884v2](http://arxiv.org/abs/2401.15884v2)  **Published Date:** 2024-01-29
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-   - **Cookbook:** [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
-
-**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the
-accuracy of generated texts cannot be secured solely by the parametric
-knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a
-practicable complement to LLMs, it relies heavily on the relevance of retrieved
-documents, raising concerns about how the model behaves if retrieval goes
-wrong. To this end, we propose the Corrective Retrieval Augmented Generation
-(CRAG) to improve the robustness of generation. Specifically, a lightweight
-retrieval evaluator is designed to assess the overall quality of retrieved
-documents for a query, returning a confidence degree based on which different
-knowledge retrieval actions can be triggered. Since retrieval from static and
-limited corpora can only return sub-optimal documents, large-scale web searches
-are utilized as an extension for augmenting the retrieval results. Besides, a
-decompose-then-recompose algorithm is designed for retrieved documents to
-selectively focus on key information and filter out irrelevant information in
-them. CRAG is plug-and-play and can be seamlessly coupled with various
-RAG-based approaches. Experiments on four datasets covering short- and
-long-form generation tasks show that CRAG can significantly improve the
-performance of RAG-based approaches.
-                
-## Code Generation with AlphaCodium: From Prompt Engineering to Flow Engineering
-
-- **Authors:** Tal Ridnik, Dedy Kredo, Itamar Friedman
-- **arXiv id:** [2401.08500v1](http://arxiv.org/abs/2401.08500v1)  **Published Date:** 2024-01-16
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-
-**Abstract:** Code generation problems differ from common natural language problems - they
-require matching the exact syntax of the target language, identifying happy
-paths and edge cases, paying attention to numerous small details in the problem
-spec, and addressing other code-specific issues and requirements. Hence, many
-of the optimizations and tricks that have been successful in natural language
-generation may not be effective for code tasks. In this work, we propose a new
-approach to code generation by LLMs, which we call AlphaCodium - a test-based,
-multi-stage, code-oriented iterative flow, that improves the performances of
-LLMs on code problems. We tested AlphaCodium on a challenging code generation
-dataset called CodeContests, which includes competitive programming problems
-from platforms such as Codeforces. The proposed flow consistently and
-significantly improves results. On the validation set, for example, GPT-4
-accuracy (pass@5) increased from 19% with a single well-designed direct prompt
-to 44% with the AlphaCodium flow. Many of the principles and best practices
-acquired in this work, we believe, are broadly applicable to general code
-generation tasks. Full implementation is available at:
-https://github.com/Codium-ai/AlphaCodium
-                
-## Mixtral of Experts
-
-- **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux,  et al.
-- **arXiv id:** [2401.04088v1](http://arxiv.org/abs/2401.04088v1)  **Published Date:** 2024-01-08
-- **LangChain:**
-
-   - **Cookbook:** [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
-
-**Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model.
-Mixtral has the same architecture as Mistral 7B, with the difference that each
-layer is composed of 8 feedforward blocks (i.e. experts). For every token, at
-each layer, a router network selects two experts to process the current state
-and combine their outputs. Even though each token only sees two experts, the
-selected experts can be different at each timestep. As a result, each token has
-access to 47B parameters, but only uses 13B active parameters during inference.
-Mixtral was trained with a context size of 32k tokens and it outperforms or
-matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular,
-Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and
-multilingual benchmarks. We also provide a model fine-tuned to follow
-instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo,
-Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both
-the base and instruct models are released under the Apache 2.0 license.
-                
-## Dense X Retrieval: What Retrieval Granularity Should We Use?
-
-- **Authors:** Tong Chen, Hongwei Wang, Sihao Chen,  et al.
-- **arXiv id:** [2312.06648v2](http://arxiv.org/abs/2312.06648v2)  **Published Date:** 2023-12-11
-- **LangChain:**
-
-   - **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
-
-**Abstract:** Dense retrieval has become a prominent method to obtain relevant context or
-world knowledge in open-domain NLP tasks. When we use a learned dense retriever
-on a retrieval corpus at inference time, an often-overlooked design choice is
-the retrieval unit in which the corpus is indexed, e.g. document, passage, or
-sentence. We discover that the retrieval unit choice significantly impacts the
-performance of both retrieval and downstream tasks. Distinct from the typical
-approach of using passages or sentences, we introduce a novel retrieval unit,
-proposition, for dense retrieval. Propositions are defined as atomic
-expressions within text, each encapsulating a distinct factoid and presented in
-a concise, self-contained natural language format. We conduct an empirical
-comparison of different retrieval granularity. Our results reveal that
-proposition-based retrieval significantly outperforms traditional passage or
-sentence-based methods in dense retrieval. Moreover, retrieval by proposition
-also enhances the performance of downstream QA tasks, since the retrieved texts
-are more condensed with question-relevant information, reducing the need for
-lengthy input tokens and minimizing the inclusion of extraneous, irrelevant
-information.
-                
-## Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
-
-- **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan,  et al.
-- **arXiv id:** [2311.09210v1](http://arxiv.org/abs/2311.09210v1)  **Published Date:** 2023-11-15
-- **LangChain:**
-
-   - **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
-
-**Abstract:** Retrieval-augmented language models (RALMs) represent a substantial
-advancement in the capabilities of large language models, notably in reducing
-factual hallucination by leveraging external knowledge sources. However, the
-reliability of the retrieved information is not always guaranteed. The
-retrieval of irrelevant data can lead to misguided responses, and potentially
-causing the model to overlook its inherent knowledge, even when it possesses
-adequate information to address the query. Moreover, standard RALMs often
-struggle to assess whether they possess adequate knowledge, both intrinsic and
-retrieved, to provide an accurate answer. In situations where knowledge is
-lacking, these systems should ideally respond with "unknown" when the answer is
-unattainable. In response to these challenges, we introduces Chain-of-Noting
-(CoN), a novel approach aimed at improving the robustness of RALMs in facing
-noisy, irrelevant documents and in handling unknown scenarios. The core idea of
-CoN is to generate sequential reading notes for retrieved documents, enabling a
-thorough evaluation of their relevance to the given question and integrating
-this information to formulate the final answer. We employed ChatGPT to create
-training data for CoN, which was subsequently trained on an LLaMa-2 7B model.
-Our experiments across four open-domain QA benchmarks show that RALMs equipped
-with CoN significantly outperform standard RALMs. Notably, CoN achieves an
-average improvement of +7.9 in EM score given entirely noisy retrieved
-documents and +10.5 in rejection rates for real-time questions that fall
-outside the pre-training knowledge scope.
-                
-## Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
-
-- **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang,  et al.
-- **arXiv id:** [2310.11511v1](http://arxiv.org/abs/2310.11511v1)  **Published Date:** 2023-10-17
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-   - **Cookbook:** [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
-
-**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often
-produce responses containing factual inaccuracies due to their sole reliance on
-the parametric knowledge they encapsulate. Retrieval-Augmented Generation
-(RAG), an ad hoc approach that augments LMs with retrieval of relevant
-knowledge, decreases such issues. However, indiscriminately retrieving and
-incorporating a fixed number of retrieved passages, regardless of whether
-retrieval is necessary, or passages are relevant, diminishes LM versatility or
-can lead to unhelpful response generation. We introduce a new framework called
-Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM's
-quality and factuality through retrieval and self-reflection. Our framework
-trains a single arbitrary LM that adaptively retrieves passages on-demand, and
-generates and reflects on retrieved passages and its own generations using
-special tokens, called reflection tokens. Generating reflection tokens makes
-the LM controllable during the inference phase, enabling it to tailor its
-behavior to diverse task requirements. Experiments show that Self-RAG (7B and
-13B parameters) significantly outperforms state-of-the-art LLMs and
-retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG
-outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA,
-reasoning and fact verification tasks, and it shows significant gains in
-improving factuality and citation accuracy for long-form generations relative
-to these models.
-                
-## Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
-
-- **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen,  et al.
-- **arXiv id:** [2310.06117v2](http://arxiv.org/abs/2310.06117v2)  **Published Date:** 2023-10-09
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-   - **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
-   - **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
-
-**Abstract:** We present Step-Back Prompting, a simple prompting technique that enables
-LLMs to do abstractions to derive high-level concepts and first principles from
-instances containing specific details. Using the concepts and principles to
-guide reasoning, LLMs significantly improve their abilities in following a
-correct reasoning path towards the solution. We conduct experiments of
-Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe
-substantial performance gains on various challenging reasoning-intensive tasks
-including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back
-Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7%
-and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.
-                
-## Skeleton-of-Thought: Prompting LLMs for Efficient Parallel Generation
-
-- **Authors:** Xuefei Ning, Zinan Lin, Zixuan Zhou,  et al.
-- **arXiv id:** [2307.15337v3](http://arxiv.org/abs/2307.15337v3)  **Published Date:** 2023-07-28
-- **LangChain:**
-
-   - **Template:** [skeleton-of-thought](https://python.langchain.com/docs/templates/skeleton-of-thought)
-
-**Abstract:** This work aims at decreasing the end-to-end generation latency of large
-language models (LLMs). One of the major causes of the high generation latency
-is the sequential decoding approach adopted by almost all state-of-the-art
-LLMs. In this work, motivated by the thinking and writing process of humans, we
-propose Skeleton-of-Thought (SoT), which first guides LLMs to generate the
-skeleton of the answer, and then conducts parallel API calls or batched
-decoding to complete the contents of each skeleton point in parallel. Not only
-does SoT provide considerable speed-ups across 12 LLMs, but it can also
-potentially improve the answer quality on several question categories. SoT is
-an initial attempt at data-centric optimization for inference efficiency, and
-showcases the potential of eliciting high-quality answers by explicitly
-planning the answer structure in language.
-                
-## Llama 2: Open Foundation and Fine-Tuned Chat Models
-
-- **Authors:** Hugo Touvron, Louis Martin, Kevin Stone,  et al.
-- **arXiv id:** [2307.09288v2](http://arxiv.org/abs/2307.09288v2)  **Published Date:** 2023-07-18
-- **LangChain:**
-
-   - **Cookbook:** [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
-
-**Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and
-fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70
-billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for
-dialogue use cases. Our models outperform open-source chat models on most
-benchmarks we tested, and based on our human evaluations for helpfulness and
-safety, may be a suitable substitute for closed-source models. We provide a
-detailed description of our approach to fine-tuning and safety improvements of
-Llama 2-Chat in order to enable the community to build on our work and
-contribute to the responsible development of LLMs.
-                
-## Lost in the Middle: How Language Models Use Long Contexts
-
-- **Authors:** Nelson F. Liu, Kevin Lin, John Hewitt,  et al.
-- **arXiv id:** [2307.03172v3](http://arxiv.org/abs/2307.03172v3)  **Published Date:** 2023-07-06
-- **LangChain:**
-
-   - **Documentation:** [docs/how_to/long_context_reorder](https://python.langchain.com/docs/how_to/long_context_reorder)
-
-**Abstract:** While recent language models have the ability to take long contexts as input,
-relatively little is known about how well they use longer context. We analyze
-the performance of language models on two tasks that require identifying
-relevant information in their input contexts: multi-document question answering
-and key-value retrieval. We find that performance can degrade significantly
-when changing the position of relevant information, indicating that current
-language models do not robustly make use of information in long input contexts.
-In particular, we observe that performance is often highest when relevant
-information occurs at the beginning or end of the input context, and
-significantly degrades when models must access relevant information in the
-middle of long contexts, even for explicitly long-context models. Our analysis
-provides a better understanding of how language models use their input context
-and provides new evaluation protocols for future long-context language models.
-                
-## Query Rewriting for Retrieval-Augmented Large Language Models
-
-- **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He,  et al.
-- **arXiv id:** [2305.14283v3](http://arxiv.org/abs/2305.14283v3)  **Published Date:** 2023-05-23
-- **LangChain:**
-
-   - **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)
-   - **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
-
-**Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the
-retrieve-then-read pipeline, making remarkable progress in knowledge-intensive
-tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of
-the previous retrieve-then-read for the retrieval-augmented LLMs from the
-perspective of the query rewriting. Unlike prior studies focusing on adapting
-either the retriever or the reader, our approach pays attention to the
-adaptation of the search query itself, for there is inevitably a gap between
-the input text and the needed knowledge in retrieval. We first prompt an LLM to
-generate the query, then use a web search engine to retrieve contexts.
-Furthermore, to better align the query to the frozen modules, we propose a
-trainable scheme for our pipeline. A small language model is adopted as a
-trainable rewriter to cater to the black-box LLM reader. The rewriter is
-trained using the feedback of the LLM reader by reinforcement learning.
-Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice
-QA. Experiments results show consistent performance improvement, indicating
-that our framework is proven effective and scalable, and brings a new framework
-for retrieval-augmented LLM.
-                
-## Large Language Model Guided Tree-of-Thought
-
-- **Authors:** Jieyi Long
-- **arXiv id:** [2305.08291v1](http://arxiv.org/abs/2305.08291v1)  **Published Date:** 2023-05-15
-- **LangChain:**
-
-   - **API Reference:** [langchain_experimental.tot](https://python.langchain.com/api_reference/experimental/tot.html)
-   - **Cookbook:** [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
-
-**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel
-approach aimed at improving the problem-solving capabilities of auto-regressive
-large language models (LLMs). The ToT technique is inspired by the human mind's
-approach for solving complex reasoning tasks through trial and error. In this
-process, the human mind explores the solution space through a tree-like thought
-process, allowing for backtracking when necessary. To implement ToT as a
-software system, we augment an LLM with additional modules including a prompter
-agent, a checker module, a memory module, and a ToT controller. In order to
-solve a given problem, these modules engage in a multi-round conversation with
-the LLM. The memory module records the conversation and state history of the
-problem solving process, which allows the system to backtrack to the previous
-steps of the thought-process and explore other directions from there. To verify
-the effectiveness of the proposed technique, we implemented a ToT-based solver
-for the Sudoku Puzzle. Experimental results show that the ToT framework can
-significantly increase the success rate of Sudoku puzzle solving. Our
-implementation of the ToT-based Sudoku solver is available on [GitHub](https://github.com/jieyilong/tree-of-thought-puzzle-solver).
-                
-## Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
-
-- **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan,  et al.
-- **arXiv id:** [2305.04091v3](http://arxiv.org/abs/2305.04091v3)  **Published Date:** 2023-05-06
-- **LangChain:**
-
-   - **Cookbook:** [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
-
-**Abstract:** Large language models (LLMs) have recently been shown to deliver impressive
-performance in various NLP tasks. To tackle multi-step reasoning tasks,
-few-shot chain-of-thought (CoT) prompting includes a few manually crafted
-step-by-step reasoning demonstrations which enable LLMs to explicitly generate
-reasoning steps and improve their reasoning task accuracy. To eliminate the
-manual effort, Zero-shot-CoT concatenates the target problem statement with
-"Let's think step by step" as an input prompt to LLMs. Despite the success of
-Zero-shot-CoT, it still suffers from three pitfalls: calculation errors,
-missing-step errors, and semantic misunderstanding errors. To address the
-missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of
-two components: first, devising a plan to divide the entire task into smaller
-subtasks, and then carrying out the subtasks according to the plan. To address
-the calculation errors and improve the quality of generated reasoning steps, we
-extend PS prompting with more detailed instructions and derive PS+ prompting.
-We evaluate our proposed prompting strategy on ten datasets across three
-reasoning problems. The experimental results over GPT-3 show that our proposed
-zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets
-by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought
-Prompting, and has comparable performance with 8-shot CoT prompting on the math
-reasoning problem. The code can be found at
-https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting.
-                
-## Zero-Shot Listwise Document Reranking with a Large Language Model
-
-- **Authors:** Xueguang Ma, Xinyu Zhang, Ronak Pradeep,  et al.
-- **arXiv id:** [2305.02156v1](http://arxiv.org/abs/2305.02156v1)  **Published Date:** 2023-05-03
-- **LangChain:**
-
-   - **Documentation:** [docs/how_to/contextual_compression](https://python.langchain.com/docs/how_to/contextual_compression)
-   - **API Reference:** [langchain...LLMListwiseRerank](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#)
-
-**Abstract:** Supervised ranking methods based on bi-encoder or cross-encoder architectures
-have shown success in multi-stage text ranking tasks, but they require large
-amounts of relevance judgments as training data. In this work, we propose
-Listwise Reranker with a Large Language Model (LRL), which achieves strong
-reranking effectiveness without using any task-specific training data.
-Different from the existing pointwise ranking methods, where documents are
-scored independently and ranked according to the scores, LRL directly generates
-a reordered list of document identifiers given the candidate documents.
-Experiments on three TREC web search datasets demonstrate that LRL not only
-outperforms zero-shot pointwise methods when reranking first-stage retrieval
-results, but can also act as a final-stage reranker to improve the top-ranked
-results of a pointwise method for improved efficiency. Additionally, we apply
-our approach to subsets of MIRACL, a recent multilingual retrieval dataset,
-with results showing its potential to generalize across different languages.
-                
-## Visual Instruction Tuning
-
-- **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu,  et al.
-- **arXiv id:** [2304.08485v2](http://arxiv.org/abs/2304.08485v2)  **Published Date:** 2023-04-17
-- **LangChain:**
-
-   - **Cookbook:** [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb), [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)
-
-**Abstract:** Instruction tuning large language models (LLMs) using machine-generated
-instruction-following data has improved zero-shot capabilities on new tasks,
-but the idea is less explored in the multimodal field. In this paper, we
-present the first attempt to use language-only GPT-4 to generate multimodal
-language-image instruction-following data. By instruction tuning on such
-generated data, we introduce LLaVA: Large Language and Vision Assistant, an
-end-to-end trained large multimodal model that connects a vision encoder and
-LLM for general-purpose visual and language understanding.Our early experiments
-show that LLaVA demonstrates impressive multimodel chat abilities, sometimes
-exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and
-yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal
-instruction-following dataset. When fine-tuned on Science QA, the synergy of
-LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make
-GPT-4 generated visual instruction tuning data, our model and code base
-publicly available.
-                
-## Generative Agents: Interactive Simulacra of Human Behavior
-
-- **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai,  et al.
-- **arXiv id:** [2304.03442v2](http://arxiv.org/abs/2304.03442v2)  **Published Date:** 2023-04-07
-- **LangChain:**
-
-   - **Cookbook:** [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
-
-**Abstract:** Believable proxies of human behavior can empower interactive applications
-ranging from immersive environments to rehearsal spaces for interpersonal
-communication to prototyping tools. In this paper, we introduce generative
-agents--computational software agents that simulate believable human behavior.
-Generative agents wake up, cook breakfast, and head to work; artists paint,
-while authors write; they form opinions, notice each other, and initiate
-conversations; they remember and reflect on days past as they plan the next
-day. To enable generative agents, we describe an architecture that extends a
-large language model to store a complete record of the agent's experiences
-using natural language, synthesize those memories over time into higher-level
-reflections, and retrieve them dynamically to plan behavior. We instantiate
-generative agents to populate an interactive sandbox environment inspired by
-The Sims, where end users can interact with a small town of twenty five agents
-using natural language. In an evaluation, these generative agents produce
-believable individual and emergent social behaviors: for example, starting with
-only a single user-specified notion that one agent wants to throw a Valentine's
-Day party, the agents autonomously spread invitations to the party over the
-next two days, make new acquaintances, ask each other out on dates to the
-party, and coordinate to show up for the party together at the right time. We
-demonstrate through ablation that the components of our agent
-architecture--observation, planning, and reflection--each contribute critically
-to the believability of agent behavior. By fusing large language models with
-computational, interactive agents, this work introduces architectural and
-interaction patterns for enabling believable simulations of human behavior.
-                
-## CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
-
-- **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani,  et al.
-- **arXiv id:** [2303.17760v2](http://arxiv.org/abs/2303.17760v2)  **Published Date:** 2023-03-31
-- **LangChain:**
-
-   - **Cookbook:** [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
-
-**Abstract:** The rapid advancement of chat-based language models has led to remarkable
-progress in complex task-solving. However, their success heavily relies on
-human input to guide the conversation, which can be challenging and
-time-consuming. This paper explores the potential of building scalable
-techniques to facilitate autonomous cooperation among communicative agents, and
-provides insight into their "cognitive" processes. To address the challenges of
-achieving autonomous cooperation, we propose a novel communicative agent
-framework named role-playing. Our approach involves using inception prompting
-to guide chat agents toward task completion while maintaining consistency with
-human intentions. We showcase how role-playing can be used to generate
-conversational data for studying the behaviors and capabilities of a society of
-agents, providing a valuable resource for investigating conversational language
-models. In particular, we conduct comprehensive studies on
-instruction-following cooperation in multi-agent settings. Our contributions
-include introducing a novel communicative agent framework, offering a scalable
-approach for studying the cooperative behaviors and capabilities of multi-agent
-systems, and open-sourcing our library to support research on communicative
-agents and beyond: https://github.com/camel-ai/camel.
-                
-## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
-
-- **Authors:** Yongliang Shen, Kaitao Song, Xu Tan,  et al.
-- **arXiv id:** [2303.17580v4](http://arxiv.org/abs/2303.17580v4)  **Published Date:** 2023-03-30
-- **LangChain:**
-
-   - **API Reference:** [langchain_experimental.autonomous_agents](https://python.langchain.com/api_reference/experimental/autonomous_agents.html)
-   - **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
-
-**Abstract:** Solving complicated AI tasks with different domains and modalities is a key
-step toward artificial general intelligence. While there are numerous AI models
-available for various domains and modalities, they cannot handle complicated AI
-tasks autonomously. Considering large language models (LLMs) have exhibited
-exceptional abilities in language understanding, generation, interaction, and
-reasoning, we advocate that LLMs could act as a controller to manage existing
-AI models to solve complicated AI tasks, with language serving as a generic
-interface to empower this. Based on this philosophy, we present HuggingGPT, an
-LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI
-models in machine learning communities (e.g., Hugging Face) to solve AI tasks.
-Specifically, we use ChatGPT to conduct task planning when receiving a user
-request, select models according to their function descriptions available in
-Hugging Face, execute each subtask with the selected AI model, and summarize
-the response according to the execution results. By leveraging the strong
-language capability of ChatGPT and abundant AI models in Hugging Face,
-HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different
-modalities and domains and achieve impressive results in language, vision,
-speech, and other challenging tasks, which paves a new way towards the
-realization of artificial general intelligence.
-                
-## A Watermark for Large Language Models
-
-- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen,  et al.
-- **arXiv id:** [2301.10226v4](http://arxiv.org/abs/2301.10226v4)  **Published Date:** 2023-01-24
-- **LangChain:**
-
-   - **API Reference:** [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
-
-**Abstract:** Potential harms of large language models can be mitigated by watermarking
-model output, i.e., embedding signals into generated text that are invisible to
-humans but algorithmically detectable from a short span of tokens. We propose a
-watermarking framework for proprietary language models. The watermark can be
-embedded with negligible impact on text quality, and can be detected using an
-efficient open-source algorithm without access to the language model API or
-parameters. The watermark works by selecting a randomized set of "green" tokens
-before a word is generated, and then softly promoting use of green tokens
-during sampling. We propose a statistical test for detecting the watermark with
-interpretable p-values, and derive an information-theoretic framework for
-analyzing the sensitivity of the watermark. We test the watermark using a
-multi-billion parameter model from the Open Pretrained Transformer (OPT)
-family, and discuss robustness and security.
-                
-## Precise Zero-Shot Dense Retrieval without Relevance Labels
-
-- **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin,  et al.
-- **arXiv id:** [2212.10496v1](http://arxiv.org/abs/2212.10496v1)  **Published Date:** 2022-12-20
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-   - **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
-   - **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
-   - **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
-
-**Abstract:** While dense retrieval has been shown effective and efficient across tasks and
-languages, it remains difficult to create effective fully zero-shot dense
-retrieval systems when no relevance label is available. In this paper, we
-recognize the difficulty of zero-shot learning and encoding relevance. Instead,
-we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a
-query, HyDE first zero-shot instructs an instruction-following language model
-(e.g. InstructGPT) to generate a hypothetical document. The document captures
-relevance patterns but is unreal and may contain false details. Then, an
-unsupervised contrastively learned encoder~(e.g. Contriever) encodes the
-document into an embedding vector. This vector identifies a neighborhood in the
-corpus embedding space, where similar real documents are retrieved based on
-vector similarity. This second step ground the generated document to the actual
-corpus, with the encoder's dense bottleneck filtering out the incorrect
-details. Our experiments show that HyDE significantly outperforms the
-state-of-the-art unsupervised dense retriever Contriever and shows strong
-performance comparable to fine-tuned retrievers, across various tasks (e.g. web
-search, QA, fact verification) and languages~(e.g. sw, ko, ja).
-                
-## Constitutional AI: Harmlessness from AI Feedback
-
-- **Authors:** Yuntao Bai, Saurav Kadavath, Sandipan Kundu,  et al.
-- **arXiv id:** [2212.08073v1](http://arxiv.org/abs/2212.08073v1)  **Published Date:** 2022-12-15
-- **LangChain:**
-
-   - **Documentation:** [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/docs/versions/migrating_chains/constitutional_chain)
-
-**Abstract:** As AI systems become more capable, we would like to enlist their help to
-supervise other AIs. We experiment with methods for training a harmless AI
-assistant through self-improvement, without any human labels identifying
-harmful outputs. The only human oversight is provided through a list of rules
-or principles, and so we refer to the method as 'Constitutional AI'. The
-process involves both a supervised learning and a reinforcement learning phase.
-In the supervised phase we sample from an initial model, then generate
-self-critiques and revisions, and then finetune the original model on revised
-responses. In the RL phase, we sample from the finetuned model, use a model to
-evaluate which of the two samples is better, and then train a preference model
-from this dataset of AI preferences. We then train with RL using the preference
-model as the reward signal, i.e. we use 'RL from AI Feedback' (RLAIF). As a
-result we are able to train a harmless but non-evasive AI assistant that
-engages with harmful queries by explaining its objections to them. Both the SL
-and RL methods can leverage chain-of-thought style reasoning to improve the
-human-judged performance and transparency of AI decision making. These methods
-make it possible to control AI behavior more precisely and with far fewer human
-labels.
-                
-## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
-
-- **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande,  et al.
-- **arXiv id:** [2212.07425v3](http://arxiv.org/abs/2212.07425v3)  **Published Date:** 2022-12-12
-- **LangChain:**
-
-   - **API Reference:** [langchain_experimental.fallacy_removal](https://python.langchain.com/api_reference/experimental/fallacy_removal.html)
-
-**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been
-amplified in the Internet era. Given the volume of data and the subtlety of
-identifying violations of argumentation norms, supporting information analytics
-tasks, like content moderation, with trustworthy methods that can identify
-logical fallacies is essential. In this paper, we formalize prior theoretical
-work on logical fallacies into a comprehensive three-stage evaluation framework
-of detection, coarse-grained, and fine-grained classification. We adapt
-existing evaluation datasets for each stage of the evaluation. We employ three
-families of robust and explainable methods based on prototype reasoning,
-instance-based reasoning, and knowledge injection. The methods combine language
-models with background knowledge and explainable mechanisms. Moreover, we
-address data sparsity with strategies for data augmentation and curriculum
-learning. Our three-stage framework natively consolidates prior datasets and
-methods from existing tasks, like propaganda detection, serving as an
-overarching evaluation testbed. We extensively evaluate these methods on our
-datasets, focusing on their robustness and explainability. Our results provide
-insight into the strengths and weaknesses of the methods on different
-components and fallacy classes, indicating that fallacy identification is a
-challenging task that may require specialized forms of reasoning to capture
-various classes. We share our open-source code and data on GitHub to support
-further work on logical fallacy identification.
-                
-## Complementary Explanations for Effective In-Context Learning
-
-- **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz,  et al.
-- **arXiv id:** [2211.13892v2](http://arxiv.org/abs/2211.13892v2)  **Published Date:** 2022-11-25
-- **LangChain:**
-
-   - **API Reference:** [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
-
-**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
-learning from explanations in prompts, but there has been limited understanding
-of exactly how these explanations function or why they are effective. This work
-aims to better understand the mechanisms by which explanations are used for
-in-context learning. We first study the impact of two different factors on the
-performance of prompts with explanations: the computation trace (the way the
-solution is decomposed) and the natural language used to express the prompt. By
-perturbing explanations on three controlled tasks, we show that both factors
-contribute to the effectiveness of explanations. We further study how to form
-maximally effective sets of explanations for solving a given test query. We
-find that LLMs can benefit from the complementarity of the explanation set:
-diverse reasoning skills shown by different exemplars can lead to better
-performance. Therefore, we propose a maximal marginal relevance-based exemplar
-selection approach for constructing exemplar sets that are both relevant as
-well as complementary, which successfully improves the in-context learning
-performance across three real-world tasks on multiple LLMs.
-                
-## PAL: Program-aided Language Models
-
-- **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou,  et al.
-- **arXiv id:** [2211.10435v2](http://arxiv.org/abs/2211.10435v2)  **Published Date:** 2022-11-18
-- **LangChain:**
-
-   - **API Reference:** [langchain_experimental.pal_chain](https://python.langchain.com/api_reference/experimental/pal_chain.html), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)
-   - **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
-
-**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
-to perform arithmetic and symbolic reasoning tasks, when provided with a few
-examples at test time ("few-shot prompting"). Much of this success can be
-attributed to prompting methods such as "chain-of-thought'', which employ LLMs
-for both understanding the problem description by decomposing it into steps, as
-well as solving each step of the problem. While LLMs seem to be adept at this
-sort of step-by-step decomposition, LLMs often make logical and arithmetic
-mistakes in the solution part, even when the problem is decomposed correctly.
-In this paper, we present Program-Aided Language models (PAL): a novel approach
-that uses the LLM to read natural language problems and generate programs as
-the intermediate reasoning steps, but offloads the solution step to a runtime
-such as a Python interpreter. With PAL, decomposing the natural language
-problem into runnable steps remains the only learning task for the LLM, while
-solving is delegated to the interpreter. We demonstrate this synergy between a
-neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and
-algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all
-these natural language reasoning tasks, generating code using an LLM and
-reasoning using a Python interpreter leads to more accurate results than much
-larger models. For example, PAL using Codex achieves state-of-the-art few-shot
-accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
-which uses chain-of-thought by absolute 15% top-1. Our code and data are
-publicly available at http://reasonwithpal.com/ .
-                
-## An Analysis of Fusion Functions for Hybrid Retrieval
-
-- **Authors:** Sebastian Bruch, Siyu Gai, Amir Ingber
-- **arXiv id:** [2210.11934v2](http://arxiv.org/abs/2210.11934v2)  **Published Date:** 2022-10-21
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-
-**Abstract:** We study hybrid search in text retrieval where lexical and semantic search
-are fused together with the intuition that the two are complementary in how
-they model relevance. In particular, we examine fusion by a convex combination
-(CC) of lexical and semantic scores, as well as the Reciprocal Rank Fusion
-(RRF) method, and identify their advantages and potential pitfalls. Contrary to
-existing studies, we find RRF to be sensitive to its parameters; that the
-learning of a CC fusion is generally agnostic to the choice of score
-normalization; that CC outperforms RRF in in-domain and out-of-domain settings;
-and finally, that CC is sample efficient, requiring only a small set of
-training examples to tune its only parameter to a target domain.
-                
-## ReAct: Synergizing Reasoning and Acting in Language Models
-
-- **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu,  et al.
-- **arXiv id:** [2210.03629v3](http://arxiv.org/abs/2210.03629v3)  **Published Date:** 2022-10-06
-- **LangChain:**
-
-   - **Documentation:** [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/docs/concepts)
-   - **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
-
-**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
-across tasks in language understanding and interactive decision making, their
-abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g.
-action plan generation) have primarily been studied as separate topics. In this
-paper, we explore the use of LLMs to generate both reasoning traces and
-task-specific actions in an interleaved manner, allowing for greater synergy
-between the two: reasoning traces help the model induce, track, and update
-action plans as well as handle exceptions, while actions allow it to interface
-with external sources, such as knowledge bases or environments, to gather
-additional information. We apply our approach, named ReAct, to a diverse set of
-language and decision making tasks and demonstrate its effectiveness over
-state-of-the-art baselines, as well as improved human interpretability and
-trustworthiness over methods without reasoning or acting components.
-Concretely, on question answering (HotpotQA) and fact verification (Fever),
-ReAct overcomes issues of hallucination and error propagation prevalent in
-chain-of-thought reasoning by interacting with a simple Wikipedia API, and
-generates human-like task-solving trajectories that are more interpretable than
-baselines without reasoning traces. On two interactive decision making
-benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and
-reinforcement learning methods by an absolute success rate of 34% and 10%
-respectively, while being prompted with only one or two in-context examples.
-Project site with code: https://react-lm.github.io
-                
-## Deep Lake: a Lakehouse for Deep Learning
-
-- **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan,  et al.
-- **arXiv id:** [2209.10785v2](http://arxiv.org/abs/2209.10785v2)  **Published Date:** 2022-09-22
-- **LangChain:**
-
-   - **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
-
-**Abstract:** Traditional data lakes provide critical data infrastructure for analytical
-workloads by enabling time travel, running SQL queries, ingesting data with
-ACID transactions, and visualizing petabyte-scale datasets on cloud storage.
-They allow organizations to break down data silos, unlock data-driven
-decision-making, improve operational efficiency, and reduce costs. However, as
-deep learning usage increases, traditional data lakes are not well-designed for
-applications such as natural language processing (NLP), audio processing,
-computer vision, and applications involving non-tabular datasets. This paper
-presents Deep Lake, an open-source lakehouse for deep learning applications
-developed at Activeloop. Deep Lake maintains the benefits of a vanilla data
-lake with one key difference: it stores complex data, such as images, videos,
-annotations, as well as tabular data, in the form of tensors and rapidly
-streams the data over the network to (a) Tensor Query Language, (b) in-browser
-visualization engine, or (c) deep learning frameworks without sacrificing GPU
-utilization. Datasets stored in Deep Lake can be accessed from PyTorch,
-TensorFlow, JAX, and integrate with numerous MLOps tools.
-                
-## Matryoshka Representation Learning
-
-- **Authors:** Aditya Kusupati, Gantavya Bhatt, Aniket Rege,  et al.
-- **arXiv id:** [2205.13147v4](http://arxiv.org/abs/2205.13147v4)  **Published Date:** 2022-05-26
-- **LangChain:**
-
-   - **Documentation:** [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
-
-**Abstract:** Learned representations are a central component in modern ML systems, serving
-a multitude of downstream tasks. When training such representations, it is
-often the case that computational and statistical constraints for each
-downstream task are unknown. In this context rigid, fixed capacity
-representations can be either over or under-accommodating to the task at hand.
-This leads us to ask: can we design a flexible representation that can adapt to
-multiple downstream tasks with varying computational resources? Our main
-contribution is Matryoshka Representation Learning (MRL) which encodes
-information at different granularities and allows a single embedding to adapt
-to the computational constraints of downstream tasks. MRL minimally modifies
-existing representation learning pipelines and imposes no additional cost
-during inference and deployment. MRL learns coarse-to-fine representations that
-are at least as accurate and rich as independently trained low-dimensional
-representations. The flexibility within the learned Matryoshka Representations
-offer: (a) up to 14x smaller embedding size for ImageNet-1K classification at
-the same level of accuracy; (b) up to 14x real-world speed-ups for large-scale
-retrieval on ImageNet-1K and 4K; and (c) up to 2% accuracy improvements for
-long-tail few-shot classification, all while being as robust as the original
-representations. Finally, we show that MRL extends seamlessly to web-scale
-datasets (ImageNet, JFT) across various modalities -- vision (ViT, ResNet),
-vision + language (ALIGN) and language (BERT). MRL code and pretrained models
-are open-sourced at https://github.com/RAIVNLab/MRL.
-                
-## Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
-
-- **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
-- **arXiv id:** [2205.12654v1](http://arxiv.org/abs/2205.12654v1)  **Published Date:** 2022-05-25
-- **LangChain:**
-
-   - **API Reference:** [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
-
-**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
-languages is challenging, in particular to cover the long tail of low-resource
-languages. A promising approach has been to train one-for-all multilingual
-models capable of cross-lingual transfer, but these models often suffer from
-insufficient capacity and interference between unrelated languages. Instead, we
-move away from this approach and focus on training multiple language (family)
-specific representations, but most prominently enable all languages to still be
-encoded in the same representational space. To achieve this, we focus on
-teacher-student training, allowing all encoders to be mutually compatible for
-bitext mining, and enabling fast learning of new languages. We introduce a new
-teacher-student training scheme which combines supervised and self-supervised
-training, allowing encoders to take advantage of monolingual training data,
-which is valuable in the low-resource setting.
-  Our approach significantly outperforms the original LASER encoder. We study
-very low-resource languages and handle 50 African languages, many of which are
-not covered by any other model. For these languages, we train sentence
-encoders, mine bitexts, and validate the bitexts by training NMT systems.
-                
-## Evaluating the Text-to-SQL Capabilities of Large Language Models
-
-- **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
-- **arXiv id:** [2204.00498v1](http://arxiv.org/abs/2204.00498v1)  **Published Date:** 2022-03-15
-- **LangChain:**
-
-   - **Documentation:** [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa)
-   - **API Reference:** [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
-
-**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
-language model. We find that, without any finetuning, Codex is a strong
-baseline on the Spider benchmark; we also analyze the failure modes of Codex in
-this setting. Furthermore, we demonstrate on the GeoQuery and Scholar
-benchmarks that a small number of in-domain examples provided in the prompt
-enables Codex to perform better than state-of-the-art models finetuned on such
-few-shot examples.
-                
-## Locally Typical Sampling
-
-- **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher,  et al.
-- **arXiv id:** [2202.00666v5](http://arxiv.org/abs/2202.00666v5)  **Published Date:** 2022-02-01
-- **LangChain:**
-
-   - **API Reference:** [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
-
-**Abstract:** Today's probabilistic language generators fall short when it comes to
-producing coherent and fluent text despite the fact that the underlying models
-perform well under standard metrics, e.g., perplexity. This discrepancy has
-puzzled the language generation community for the last few years. In this work,
-we posit that the abstraction of natural language generation as a discrete
-stochastic process--which allows for an information-theoretic analysis--can
-provide new insights into the behavior of probabilistic language generators,
-e.g., why high-probability texts can be dull or repetitive. Humans use language
-as a means of communicating information, aiming to do so in a simultaneously
-efficient and error-minimizing manner; in fact, psycholinguistics research
-suggests humans choose each word in a string with this subconscious goal in
-mind. We formally define the set of strings that meet this criterion: those for
-which each word has an information content close to the expected information
-content, i.e., the conditional entropy of our model. We then propose a simple
-and efficient procedure for enforcing this criterion when generating from
-probabilistic models, which we call locally typical sampling. Automatic and
-human evaluations show that, in comparison to nucleus and top-k sampling,
-locally typical sampling offers competitive performance (in both abstractive
-summarization and story generation) in terms of quality while consistently
-reducing degenerate repetitions.
-                
-## ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction
-
-- **Authors:** Keshav Santhanam, Omar Khattab, Jon Saad-Falcon,  et al.
-- **arXiv id:** [2112.01488v3](http://arxiv.org/abs/2112.01488v3)  **Published Date:** 2021-12-02
-- **LangChain:**
-
-   - **Documentation:** [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
-
-**Abstract:** Neural information retrieval (IR) has greatly advanced search and other
-knowledge-intensive language tasks. While many neural IR methods encode queries
-and documents into single-vector representations, late interaction models
-produce multi-vector representations at the granularity of each token and
-decompose relevance modeling into scalable token-level computations. This
-decomposition has been shown to make late interaction more effective, but it
-inflates the space footprint of these models by an order of magnitude. In this
-work, we introduce ColBERTv2, a retriever that couples an aggressive residual
-compression mechanism with a denoised supervision strategy to simultaneously
-improve the quality and space footprint of late interaction. We evaluate
-ColBERTv2 across a wide range of benchmarks, establishing state-of-the-art
-quality within and outside the training domain while reducing the space
-footprint of late interaction models by 6--10$\times$.
-                
-## Learning Transferable Visual Models From Natural Language Supervision
-
-- **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy,  et al.
-- **arXiv id:** [2103.00020v1](http://arxiv.org/abs/2103.00020v1)  **Published Date:** 2021-02-26
-- **LangChain:**
-
-   - **API Reference:** [langchain_experimental.open_clip](https://python.langchain.com/api_reference/experimental/open_clip.html)
-
-**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set
-of predetermined object categories. This restricted form of supervision limits
-their generality and usability since additional labeled data is needed to
-specify any other visual concept. Learning directly from raw text about images
-is a promising alternative which leverages a much broader source of
-supervision. We demonstrate that the simple pre-training task of predicting
-which caption goes with which image is an efficient and scalable way to learn
-SOTA image representations from scratch on a dataset of 400 million (image,
-text) pairs collected from the internet. After pre-training, natural language
-is used to reference learned visual concepts (or describe new ones) enabling
-zero-shot transfer of the model to downstream tasks. We study the performance
-of this approach by benchmarking on over 30 different existing computer vision
-datasets, spanning tasks such as OCR, action recognition in videos,
-geo-localization, and many types of fine-grained object classification. The
-model transfers non-trivially to most tasks and is often competitive with a
-fully supervised baseline without the need for any dataset specific training.
-For instance, we match the accuracy of the original ResNet-50 on ImageNet
-zero-shot without needing to use any of the 1.28 million training examples it
-was trained on. We release our code and pre-trained model weights at
-https://github.com/OpenAI/CLIP.
-                
-## Language Models are Few-Shot Learners
-
-- **Authors:** Tom B. Brown, Benjamin Mann, Nick Ryder,  et al.
-- **arXiv id:** [2005.14165v4](http://arxiv.org/abs/2005.14165v4)  **Published Date:** 2020-05-28
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-
-**Abstract:** Recent work has demonstrated substantial gains on many NLP tasks and
-benchmarks by pre-training on a large corpus of text followed by fine-tuning on
-a specific task. While typically task-agnostic in architecture, this method
-still requires task-specific fine-tuning datasets of thousands or tens of
-thousands of examples. By contrast, humans can generally perform a new language
-task from only a few examples or from simple instructions - something which
-current NLP systems still largely struggle to do. Here we show that scaling up
-language models greatly improves task-agnostic, few-shot performance, sometimes
-even reaching competitiveness with prior state-of-the-art fine-tuning
-approaches. Specifically, we train GPT-3, an autoregressive language model with
-175 billion parameters, 10x more than any previous non-sparse language model,
-and test its performance in the few-shot setting. For all tasks, GPT-3 is
-applied without any gradient updates or fine-tuning, with tasks and few-shot
-demonstrations specified purely via text interaction with the model. GPT-3
-achieves strong performance on many NLP datasets, including translation,
-question-answering, and cloze tasks, as well as several tasks that require
-on-the-fly reasoning or domain adaptation, such as unscrambling words, using a
-novel word in a sentence, or performing 3-digit arithmetic. At the same time,
-we also identify some datasets where GPT-3's few-shot learning still struggles,
-as well as some datasets where GPT-3 faces methodological issues related to
-training on large web corpora. Finally, we find that GPT-3 can generate samples
-of news articles which human evaluators have difficulty distinguishing from
-articles written by humans. We discuss broader societal impacts of this finding
-and of GPT-3 in general.
-                
-## Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks
-
-- **Authors:** Patrick Lewis, Ethan Perez, Aleksandra Piktus,  et al.
-- **arXiv id:** [2005.11401v4](http://arxiv.org/abs/2005.11401v4)  **Published Date:** 2020-05-22
-- **LangChain:**
-
-   - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
-
-**Abstract:** Large pre-trained language models have been shown to store factual knowledge
-in their parameters, and achieve state-of-the-art results when fine-tuned on
-downstream NLP tasks. However, their ability to access and precisely manipulate
-knowledge is still limited, and hence on knowledge-intensive tasks, their
-performance lags behind task-specific architectures. Additionally, providing
-provenance for their decisions and updating their world knowledge remain open
-research problems. Pre-trained models with a differentiable access mechanism to
-explicit non-parametric memory can overcome this issue, but have so far been
-only investigated for extractive downstream tasks. We explore a general-purpose
-fine-tuning recipe for retrieval-augmented generation (RAG) -- models which
-combine pre-trained parametric and non-parametric memory for language
-generation. We introduce RAG models where the parametric memory is a
-pre-trained seq2seq model and the non-parametric memory is a dense vector index
-of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG
-formulations, one which conditions on the same retrieved passages across the
-whole generated sequence, the other can use different passages per token. We
-fine-tune and evaluate our models on a wide range of knowledge-intensive NLP
-tasks and set the state-of-the-art on three open domain QA tasks, outperforming
-parametric seq2seq models and task-specific retrieve-and-extract architectures.
-For language generation tasks, we find that RAG models generate more specific,
-diverse and factual language than a state-of-the-art parametric-only seq2seq
-baseline.
-                
-## CTRL: A Conditional Transformer Language Model for Controllable Generation
-
-- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney,  et al.
-- **arXiv id:** [1909.05858v2](http://arxiv.org/abs/1909.05858v2)  **Published Date:** 2019-09-11
-- **LangChain:**
-
-   - **API Reference:** [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
-
-**Abstract:** Large-scale language models show promising text generation capabilities, but
-users cannot easily control particular aspects of the generated text. We
-release CTRL, a 1.63 billion-parameter conditional transformer language model,
-trained to condition on control codes that govern style, content, and
-task-specific behavior. Control codes were derived from structure that
-naturally co-occurs with raw text, preserving the advantages of unsupervised
-learning while providing more explicit control over text generation. These
-codes also allow CTRL to predict which parts of the training data are most
-likely given a sequence. This provides a potential method for analyzing large
-amounts of data via model-based source attribution. We have released multiple
-full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
-                
\ No newline at end of file
diff --git a/langchain_md_files/additional_resources/dependents.mdx b/langchain_md_files/additional_resources/dependents.mdx
deleted file mode 100644
index a09df5027ecdca05cfe4e1f372602cc7341d362d..0000000000000000000000000000000000000000
--- a/langchain_md_files/additional_resources/dependents.mdx
+++ /dev/null
@@ -1,554 +0,0 @@
-# Dependents
-
-Dependents stats for `langchain-ai/langchain`
-
-[![](https://img.shields.io/static/v1?label=Used%20by&message=41717&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
-[![](https://img.shields.io/static/v1?label=Used%20by%20(public)&message=538&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
-[![](https://img.shields.io/static/v1?label=Used%20by%20(private)&message=41179&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
-
-
-[update: `2023-12-08`; only dependent repositories with Stars > 100]
-
-
-| Repository | Stars  |
-| :--------  | -----: |
-|[AntonOsika/gpt-engineer](https://github.com/AntonOsika/gpt-engineer) | 46514 |
-|[imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 44439 |
-|[LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 35906 |
-|[hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 35528 |
-|[moymix/TaskMatrix](https://github.com/moymix/TaskMatrix) | 34342 |
-|[geekan/MetaGPT](https://github.com/geekan/MetaGPT) | 31126 |
-|[streamlit/streamlit](https://github.com/streamlit/streamlit) | 28911 |
-|[reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 27833 |
-|[StanGirard/quivr](https://github.com/StanGirard/quivr) | 26032 |
-|[OpenBB-finance/OpenBBTerminal](https://github.com/OpenBB-finance/OpenBBTerminal) | 24946 |
-|[run-llama/llama_index](https://github.com/run-llama/llama_index) | 24859 |
-|[jmorganca/ollama](https://github.com/jmorganca/ollama) | 20849 |
-|[openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 20249 |
-|[chatchat-space/Langchain-Chatchat](https://github.com/chatchat-space/Langchain-Chatchat) | 19305 |
-|[mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 19172 |
-|[PromtEngineer/localGPT](https://github.com/PromtEngineer/localGPT) | 17528 |
-|[cube-js/cube](https://github.com/cube-js/cube) | 16575 |
-|[mlflow/mlflow](https://github.com/mlflow/mlflow) | 16000 |
-|[mudler/LocalAI](https://github.com/mudler/LocalAI) | 14067 |
-|[logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 13679 |
-|[GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 13648 |
-|[arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 13423 |
-|[openai/evals](https://github.com/openai/evals) | 12649 |
-|[airbytehq/airbyte](https://github.com/airbytehq/airbyte) | 12460 |
-|[langgenius/dify](https://github.com/langgenius/dify) | 11859 |
-|[databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 10672 |
-|[AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 9437 |
-|[langchain-ai/langchainjs](https://github.com/langchain-ai/langchainjs) | 9227 |
-|[gventuri/pandas-ai](https://github.com/gventuri/pandas-ai) | 9203 |
-|[aws/amazon-sagemaker-examples](https://github.com/aws/amazon-sagemaker-examples) | 9079 |
-|[h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 8945 |
-|[PipedreamHQ/pipedream](https://github.com/PipedreamHQ/pipedream) | 7550 |
-|[bentoml/OpenLLM](https://github.com/bentoml/OpenLLM) | 6957 |
-|[THUDM/ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6801 |
-|[microsoft/promptflow](https://github.com/microsoft/promptflow) | 6776 |
-|[cpacker/MemGPT](https://github.com/cpacker/MemGPT) | 6642 |
-|[joshpxyne/gpt-migrate](https://github.com/joshpxyne/gpt-migrate) | 6482 |
-|[zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 6037 |
-|[embedchain/embedchain](https://github.com/embedchain/embedchain) | 6023 |
-|[mage-ai/mage-ai](https://github.com/mage-ai/mage-ai) | 6019 |
-|[assafelovic/gpt-researcher](https://github.com/assafelovic/gpt-researcher) | 5936 |
-|[sweepai/sweep](https://github.com/sweepai/sweep) | 5855 |
-|[wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 5766 |
-|[zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 5710 |
-|[pdm-project/pdm](https://github.com/pdm-project/pdm) | 5665 |
-|[GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 5568 |
-|[gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 5507 |
-|[Shaunwei/RealChar](https://github.com/Shaunwei/RealChar) | 5501 |
-|[facebookresearch/llama-recipes](https://github.com/facebookresearch/llama-recipes) | 5477 |
-|[serge-chat/serge](https://github.com/serge-chat/serge) | 5221 |
-|[run-llama/rags](https://github.com/run-llama/rags) | 4916 |
-|[openchatai/OpenChat](https://github.com/openchatai/OpenChat) | 4870 |
-|[danswer-ai/danswer](https://github.com/danswer-ai/danswer) | 4774 |
-|[langchain-ai/opengpts](https://github.com/langchain-ai/opengpts) | 4709 |
-|[postgresml/postgresml](https://github.com/postgresml/postgresml) | 4639 |
-|[MineDojo/Voyager](https://github.com/MineDojo/Voyager) | 4582 |
-|[intel-analytics/BigDL](https://github.com/intel-analytics/BigDL) | 4581 |
-|[yihong0618/xiaogpt](https://github.com/yihong0618/xiaogpt) | 4359 |
-|[RayVentura/ShortGPT](https://github.com/RayVentura/ShortGPT) | 4357 |
-|[Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 4317 |
-|[madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 4289 |
-|[apache/nifi](https://github.com/apache/nifi) | 4098 |
-|[langchain-ai/chat-langchain](https://github.com/langchain-ai/chat-langchain) | 4091 |
-|[aiwaves-cn/agents](https://github.com/aiwaves-cn/agents) | 4073 |
-|[krishnaik06/The-Grand-Complete-Data-Science-Materials](https://github.com/krishnaik06/The-Grand-Complete-Data-Science-Materials) | 4065 |
-|[khoj-ai/khoj](https://github.com/khoj-ai/khoj) | 4016 |
-|[Azure/azure-sdk-for-python](https://github.com/Azure/azure-sdk-for-python) | 3941 |
-|[PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 3915 |
-|[OpenBMB/ToolBench](https://github.com/OpenBMB/ToolBench) | 3799 |
-|[marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 3771 |
-|[kyegomez/tree-of-thoughts](https://github.com/kyegomez/tree-of-thoughts) | 3688 |
-|[Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 3543 |
-|[llm-workflow-engine/llm-workflow-engine](https://github.com/llm-workflow-engine/llm-workflow-engine) | 3515 |
-|[shroominic/codeinterpreter-api](https://github.com/shroominic/codeinterpreter-api) | 3425 |
-|[openchatai/OpenCopilot](https://github.com/openchatai/OpenCopilot) | 3418 |
-|[josStorer/RWKV-Runner](https://github.com/josStorer/RWKV-Runner) | 3297 |
-|[whitead/paper-qa](https://github.com/whitead/paper-qa) | 3280 |
-|[homanp/superagent](https://github.com/homanp/superagent) | 3258 |
-|[ParisNeo/lollms-webui](https://github.com/ParisNeo/lollms-webui) | 3199 |
-|[OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 3099 |
-|[project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 3090 |
-|[OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 2989 |
-|[xlang-ai/OpenAgents](https://github.com/xlang-ai/OpenAgents) | 2825 |
-|[dataelement/bisheng](https://github.com/dataelement/bisheng) | 2797 |
-|[Mintplex-Labs/anything-llm](https://github.com/Mintplex-Labs/anything-llm) | 2784 |
-|[OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 2734 |
-|[run-llama/llama-hub](https://github.com/run-llama/llama-hub) | 2721 |
-|[SamurAIGPT/EmbedAI](https://github.com/SamurAIGPT/EmbedAI) | 2647 |
-|[NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 2637 |
-|[X-D-Lab/LangChain-ChatGLM-Webui](https://github.com/X-D-Lab/LangChain-ChatGLM-Webui) | 2532 |
-|[GerevAI/gerev](https://github.com/GerevAI/gerev) | 2517 |
-|[keephq/keep](https://github.com/keephq/keep) | 2448 |
-|[yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 2397 |
-|[OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 2324 |
-|[IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 2241 |
-|[YiVal/YiVal](https://github.com/YiVal/YiVal) | 2232 |
-|[jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 2189 |
-|[Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 2136 |
-|[microsoft/TaskWeaver](https://github.com/microsoft/TaskWeaver) | 2126 |
-|[hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 2083 |
-|[FlagOpen/FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) | 2053 |
-|[paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1999 |
-|[hegelai/prompttools](https://github.com/hegelai/prompttools) | 1984 |
-|[mckinsey/vizro](https://github.com/mckinsey/vizro) | 1951 |
-|[vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1868 |
-|[dot-agent/openAMS](https://github.com/dot-agent/openAMS) | 1796 |
-|[explodinggradients/ragas](https://github.com/explodinggradients/ragas) | 1766 |
-|[AI-Citizen/SolidGPT](https://github.com/AI-Citizen/SolidGPT) | 1761 |
-|[Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1696 |
-|[run-llama/sec-insights](https://github.com/run-llama/sec-insights) | 1654 |
-|[avinashkranjan/Amazing-Python-Scripts](https://github.com/avinashkranjan/Amazing-Python-Scripts) | 1635 |
-|[microsoft/WhatTheHack](https://github.com/microsoft/WhatTheHack) | 1629 |
-|[noahshinn/reflexion](https://github.com/noahshinn/reflexion) | 1625 |
-|[psychic-api/psychic](https://github.com/psychic-api/psychic) | 1618 |
-|[Forethought-Technologies/AutoChain](https://github.com/Forethought-Technologies/AutoChain) | 1611 |
-|[pinterest/querybook](https://github.com/pinterest/querybook) | 1586 |
-|[refuel-ai/autolabel](https://github.com/refuel-ai/autolabel) | 1553 |
-|[jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 1537 |
-|[jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1522 |
-|[agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1493 |
-|[ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1484 |
-|[greshake/llm-security](https://github.com/greshake/llm-security) | 1483 |
-|[promptfoo/promptfoo](https://github.com/promptfoo/promptfoo) | 1480 |
-|[milvus-io/bootcamp](https://github.com/milvus-io/bootcamp) | 1477 |
-|[richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 1475 |
-|[melih-unsal/DemoGPT](https://github.com/melih-unsal/DemoGPT) | 1428 |
-|[YORG-AI/Open-Assistant](https://github.com/YORG-AI/Open-Assistant) | 1419 |
-|[101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 1416 |
-|[jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1408 |
-|[mmz-001/knowledge_gpt](https://github.com/mmz-001/knowledge_gpt) | 1398 |
-|[intel/intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers) | 1387 |
-|[Azure/azureml-examples](https://github.com/Azure/azureml-examples) | 1385 |
-|[lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1367 |
-|[eyurtsev/kor](https://github.com/eyurtsev/kor) | 1355 |
-|[xusenlinzy/api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) | 1325 |
-|[griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 1323 |
-|[SuperDuperDB/superduperdb](https://github.com/SuperDuperDB/superduperdb) | 1290 |
-|[cofactoryai/textbase](https://github.com/cofactoryai/textbase) | 1284 |
-|[psychic-api/rag-stack](https://github.com/psychic-api/rag-stack) | 1260 |
-|[filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 1250 |
-|[nod-ai/SHARK](https://github.com/nod-ai/SHARK) | 1237 |
-|[pluralsh/plural](https://github.com/pluralsh/plural) | 1234 |
-|[cheshire-cat-ai/core](https://github.com/cheshire-cat-ai/core) | 1194 |
-|[LC1332/Chat-Haruhi-Suzumiya](https://github.com/LC1332/Chat-Haruhi-Suzumiya) | 1184 |
-|[poe-platform/server-bot-quick-start](https://github.com/poe-platform/server-bot-quick-start) | 1182 |
-|[microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 1180 |
-|[juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1171 |
-|[visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 1156 |
-|[alejandro-ao/ask-multiple-pdfs](https://github.com/alejandro-ao/ask-multiple-pdfs) | 1153 |
-|[ThousandBirdsInc/chidori](https://github.com/ThousandBirdsInc/chidori) | 1152 |
-|[irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 1137 |
-|[SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 1083 |
-|[ray-project/llm-applications](https://github.com/ray-project/llm-applications) | 1080 |
-|[run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 1072 |
-|[jiran214/GPT-vup](https://github.com/jiran214/GPT-vup) | 1041 |
-|[MetaGLM/FinGLM](https://github.com/MetaGLM/FinGLM) | 1035 |
-|[peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 1020 |
-|[Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 991 |
-|[langchain-ai/langserve](https://github.com/langchain-ai/langserve) | 983 |
-|[THUDM/AgentTuning](https://github.com/THUDM/AgentTuning) | 976 |
-|[rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 975 |
-|[codeacme17/examor](https://github.com/codeacme17/examor) | 964 |
-|[all-in-aigc/gpts-works](https://github.com/all-in-aigc/gpts-works) | 946 |
-|[Ikaros-521/AI-Vtuber](https://github.com/Ikaros-521/AI-Vtuber) | 946 |
-|[microsoft/Llama-2-Onnx](https://github.com/microsoft/Llama-2-Onnx) | 898 |
-|[cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 895 |
-|[ricklamers/shell-ai](https://github.com/ricklamers/shell-ai) | 893 |
-|[modelscope/modelscope-agent](https://github.com/modelscope/modelscope-agent) | 893 |
-|[seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 886 |
-|[ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 880 |
-|[kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference](https://github.com/kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference) | 872 |
-|[corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 846 |
-|[hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 841 |
-|[kreneskyp/ix](https://github.com/kreneskyp/ix) | 821 |
-|[Link-AGI/AutoAgents](https://github.com/Link-AGI/AutoAgents) | 820 |
-|[truera/trulens](https://github.com/truera/trulens) | 794 |
-|[Dataherald/dataherald](https://github.com/Dataherald/dataherald) | 788 |
-|[sunlabuiuc/PyHealth](https://github.com/sunlabuiuc/PyHealth) | 783 |
-|[jondurbin/airoboros](https://github.com/jondurbin/airoboros) | 783 |
-|[pyspark-ai/pyspark-ai](https://github.com/pyspark-ai/pyspark-ai) | 782 |
-|[confident-ai/deepeval](https://github.com/confident-ai/deepeval) | 780 |
-|[billxbf/ReWOO](https://github.com/billxbf/ReWOO) | 777 |
-|[langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent) | 776 |
-|[akshata29/entaoai](https://github.com/akshata29/entaoai) | 771 |
-|[LambdaLabsML/examples](https://github.com/LambdaLabsML/examples) | 770 |
-|[getmetal/motorhead](https://github.com/getmetal/motorhead) | 768 |
-|[Dicklesworthstone/swiss_army_llama](https://github.com/Dicklesworthstone/swiss_army_llama) | 757 |
-|[ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 757 |
-|[msoedov/langcorn](https://github.com/msoedov/langcorn) | 754 |
-|[e-johnstonn/BriefGPT](https://github.com/e-johnstonn/BriefGPT) | 753 |
-|[microsoft/sample-app-aoai-chatGPT](https://github.com/microsoft/sample-app-aoai-chatGPT) | 749 |
-|[explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 731 |
-|[MiuLab/Taiwan-LLM](https://github.com/MiuLab/Taiwan-LLM) | 716 |
-|[whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 702 |
-|[Azure-Samples/openai](https://github.com/Azure-Samples/openai) | 692 |
-|[iusztinpaul/hands-on-llms](https://github.com/iusztinpaul/hands-on-llms) | 687 |
-|[safevideo/autollm](https://github.com/safevideo/autollm) | 682 |
-|[OpenGenerativeAI/GenossGPT](https://github.com/OpenGenerativeAI/GenossGPT) | 669 |
-|[NoDataFound/hackGPT](https://github.com/NoDataFound/hackGPT) | 663 |
-|[AILab-CVC/GPT4Tools](https://github.com/AILab-CVC/GPT4Tools) | 662 |
-|[langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 657 |
-|[yvann-ba/Robby-chatbot](https://github.com/yvann-ba/Robby-chatbot) | 639 |
-|[alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 635 |
-|[amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 630 |
-|[microsoft/PodcastCopilot](https://github.com/microsoft/PodcastCopilot) | 621 |
-|[aws-samples/aws-genai-llm-chatbot](https://github.com/aws-samples/aws-genai-llm-chatbot) | 616 |
-|[NeumTry/NeumAI](https://github.com/NeumTry/NeumAI) | 605 |
-|[namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 599 |
-|[plastic-labs/tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 595 |
-|[marimo-team/marimo](https://github.com/marimo-team/marimo) | 591 |
-|[yakami129/VirtualWife](https://github.com/yakami129/VirtualWife) | 586 |
-|[xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 584 |
-|[jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 573 |
-|[dgarnitz/vectorflow](https://github.com/dgarnitz/vectorflow) | 568 |
-|[yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 564 |
-|[daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 563 |
-|[traceloop/openllmetry](https://github.com/traceloop/openllmetry) | 559 |
-|[Agenta-AI/agenta](https://github.com/Agenta-AI/agenta) | 546 |
-|[michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 545 |
-|[jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 544 |
-|[mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 533 |
-|[marella/chatdocs](https://github.com/marella/chatdocs) | 532 |
-|[opentensor/bittensor](https://github.com/opentensor/bittensor) | 532 |
-|[DjangoPeng/openai-quickstart](https://github.com/DjangoPeng/openai-quickstart) | 527 |
-|[freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 517 |
-|[sidhq/Multi-GPT](https://github.com/sidhq/Multi-GPT) | 515 |
-|[alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 514 |
-|[sajjadium/ctf-archives](https://github.com/sajjadium/ctf-archives) | 507 |
-|[continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 502 |
-|[steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 494 |
-|[mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 493 |
-|[langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 492 |
-|[logan-markewich/llama_index_starter_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 483 |
-|[datawhalechina/llm-universe](https://github.com/datawhalechina/llm-universe) | 475 |
-|[leondz/garak](https://github.com/leondz/garak) | 464 |
-|[RedisVentures/ArXivChatGuru](https://github.com/RedisVentures/ArXivChatGuru) | 461 |
-|[Anil-matcha/Chatbase](https://github.com/Anil-matcha/Chatbase) | 455 |
-|[Aiyu-awa/luna-ai](https://github.com/Aiyu-awa/luna-ai) | 450 |
-|[DataDog/dd-trace-py](https://github.com/DataDog/dd-trace-py) | 450 |
-|[Azure-Samples/miyagi](https://github.com/Azure-Samples/miyagi) | 449 |
-|[poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 447 |
-|[onlyphantom/llm-python](https://github.com/onlyphantom/llm-python) | 446 |
-|[junruxiong/IncarnaMind](https://github.com/junruxiong/IncarnaMind) | 441 |
-|[CarperAI/OpenELM](https://github.com/CarperAI/OpenELM) | 441 |
-|[daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 437 |
-|[showlab/VLog](https://github.com/showlab/VLog) | 436 |
-|[wandb/weave](https://github.com/wandb/weave) | 420 |
-|[QwenLM/Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) | 419 |
-|[huchenxucs/ChatDB](https://github.com/huchenxucs/ChatDB) | 416 |
-|[jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 411 |
-|[monarch-initiative/ontogpt](https://github.com/monarch-initiative/ontogpt) | 408 |
-|[mallorbc/Finetune_LLMs](https://github.com/mallorbc/Finetune_LLMs) | 406 |
-|[JayZeeDesign/researcher-gpt](https://github.com/JayZeeDesign/researcher-gpt) | 405 |
-|[rsaryev/talk-codebase](https://github.com/rsaryev/talk-codebase) | 401 |
-|[langchain-ai/langsmith-cookbook](https://github.com/langchain-ai/langsmith-cookbook) | 398 |
-|[mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 398 |
-|[morpheuslord/GPT_Vuln-analyzer](https://github.com/morpheuslord/GPT_Vuln-analyzer) | 391 |
-|[MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 387 |
-|[JohnSnowLabs/langtest](https://github.com/JohnSnowLabs/langtest) | 384 |
-|[mrwadams/attackgen](https://github.com/mrwadams/attackgen) | 381 |
-|[codefuse-ai/Test-Agent](https://github.com/codefuse-ai/Test-Agent) | 380 |
-|[personoids/personoids-lite](https://github.com/personoids/personoids-lite) | 379 |
-|[mosaicml/examples](https://github.com/mosaicml/examples) | 378 |
-|[steamship-packages/langchain-production-starter](https://github.com/steamship-packages/langchain-production-starter) | 370 |
-|[FlagAI-Open/Aquila2](https://github.com/FlagAI-Open/Aquila2) | 365 |
-|[Mintplex-Labs/vector-admin](https://github.com/Mintplex-Labs/vector-admin) | 365 |
-|[NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 357 |
-|[BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 354 |
-|[lilacai/lilac](https://github.com/lilacai/lilac) | 352 |
-|[preset-io/promptimize](https://github.com/preset-io/promptimize) | 351 |
-|[yuanjie-ai/ChatLLM](https://github.com/yuanjie-ai/ChatLLM) | 347 |
-|[andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 346 |
-|[zhoudaquan/ChatAnything](https://github.com/zhoudaquan/ChatAnything) | 343 |
-|[rgomezcasas/dotfiles](https://github.com/rgomezcasas/dotfiles) | 343 |
-|[tigerlab-ai/tiger](https://github.com/tigerlab-ai/tiger) | 342 |
-|[HumanSignal/label-studio-ml-backend](https://github.com/HumanSignal/label-studio-ml-backend) | 334 |
-|[nasa-petal/bidara](https://github.com/nasa-petal/bidara) | 334 |
-|[momegas/megabots](https://github.com/momegas/megabots) | 334 |
-|[Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 330 |
-|[CambioML/pykoi](https://github.com/CambioML/pykoi) | 326 |
-|[Nuggt-dev/Nuggt](https://github.com/Nuggt-dev/Nuggt) | 326 |
-|[wandb/edu](https://github.com/wandb/edu) | 326 |
-|[Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 324 |
-|[sugarforever/LangChain-Tutorials](https://github.com/sugarforever/LangChain-Tutorials) | 322 |
-|[liangwq/Chatglm_lora_multi-gpu](https://github.com/liangwq/Chatglm_lora_multi-gpu) | 321 |
-|[ur-whitelab/chemcrow-public](https://github.com/ur-whitelab/chemcrow-public) | 320 |
-|[itamargol/openai](https://github.com/itamargol/openai) | 318 |
-|[gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 304 |
-|[SpecterOps/Nemesis](https://github.com/SpecterOps/Nemesis) | 302 |
-|[facebookresearch/personal-timeline](https://github.com/facebookresearch/personal-timeline) | 302 |
-|[hnawaz007/pythondataanalysis](https://github.com/hnawaz007/pythondataanalysis) | 301 |
-|[Chainlit/cookbook](https://github.com/Chainlit/cookbook) | 300 |
-|[airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 300 |
-|[GPT-Fathom/GPT-Fathom](https://github.com/GPT-Fathom/GPT-Fathom) | 299 |
-|[kaarthik108/snowChat](https://github.com/kaarthik108/snowChat) | 299 |
-|[kyegomez/swarms](https://github.com/kyegomez/swarms) | 296 |
-|[LangStream/langstream](https://github.com/LangStream/langstream) | 295 |
-|[genia-dev/GeniA](https://github.com/genia-dev/GeniA) | 294 |
-|[shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 291 |
-|[TsinghuaDatabaseGroup/DB-GPT](https://github.com/TsinghuaDatabaseGroup/DB-GPT) | 290 |
-|[conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 283 |
-|[sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 283 |
-|[AutoPackAI/beebot](https://github.com/AutoPackAI/beebot) | 282 |
-|[pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 282 |
-|[gkamradt/LLMTest_NeedleInAHaystack](https://github.com/gkamradt/LLMTest_NeedleInAHaystack) | 280 |
-|[gustavz/DataChad](https://github.com/gustavz/DataChad) | 280 |
-|[Safiullah-Rahu/CSV-AI](https://github.com/Safiullah-Rahu/CSV-AI) | 278 |
-|[hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 275 |
-|[AkshitIreddy/Interactive-LLM-Powered-NPCs](https://github.com/AkshitIreddy/Interactive-LLM-Powered-NPCs) | 268 |
-|[ennucore/clippinator](https://github.com/ennucore/clippinator) | 267 |
-|[artitw/text2text](https://github.com/artitw/text2text) | 264 |
-|[anarchy-ai/LLM-VM](https://github.com/anarchy-ai/LLM-VM) | 263 |
-|[wpydcr/LLM-Kit](https://github.com/wpydcr/LLM-Kit) | 262 |
-|[streamlit/llm-examples](https://github.com/streamlit/llm-examples) | 262 |
-|[paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 262 |
-|[yym68686/ChatGPT-Telegram-Bot](https://github.com/yym68686/ChatGPT-Telegram-Bot) | 261 |
-|[PradipNichite/Youtube-Tutorials](https://github.com/PradipNichite/Youtube-Tutorials) | 259 |
-|[radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 259 |
-|[ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 259 |
-|[ml6team/fondant](https://github.com/ml6team/fondant) | 254 |
-|[bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 254 |
-|[rahulnyk/knowledge_graph](https://github.com/rahulnyk/knowledge_graph) | 253 |
-|[recalign/RecAlign](https://github.com/recalign/RecAlign) | 248 |
-|[hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 248 |
-|[fetchai/uAgents](https://github.com/fetchai/uAgents) | 247 |
-|[arthur-ai/bench](https://github.com/arthur-ai/bench) | 247 |
-|[miaoshouai/miaoshouai-assistant](https://github.com/miaoshouai/miaoshouai-assistant) | 246 |
-|[RoboCoachTechnologies/GPT-Synthesizer](https://github.com/RoboCoachTechnologies/GPT-Synthesizer) | 244 |
-|[langchain-ai/web-explorer](https://github.com/langchain-ai/web-explorer) | 242 |
-|[kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 242 |
-|[PJLab-ADG/DriveLikeAHuman](https://github.com/PJLab-ADG/DriveLikeAHuman) | 241 |
-|[stepanogil/autonomous-hr-chatbot](https://github.com/stepanogil/autonomous-hr-chatbot) | 238 |
-|[WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 236 |
-|[nexus-stc/stc](https://github.com/nexus-stc/stc) | 235 |
-|[yeagerai/genworlds](https://github.com/yeagerai/genworlds) | 235 |
-|[Gentopia-AI/Gentopia](https://github.com/Gentopia-AI/Gentopia) | 235 |
-|[alphasecio/langchain-examples](https://github.com/alphasecio/langchain-examples) | 235 |
-|[grumpyp/aixplora](https://github.com/grumpyp/aixplora) | 232 |
-|[shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 232 |
-|[darrenburns/elia](https://github.com/darrenburns/elia) | 231 |
-|[orgexyz/BlockAGI](https://github.com/orgexyz/BlockAGI) | 231 |
-|[handrew/browserpilot](https://github.com/handrew/browserpilot) | 226 |
-|[su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 225 |
-|[nicknochnack/LangchainDocuments](https://github.com/nicknochnack/LangchainDocuments) | 225 |
-|[dbpunk-labs/octogen](https://github.com/dbpunk-labs/octogen) | 224 |
-|[langchain-ai/weblangchain](https://github.com/langchain-ai/weblangchain) | 222 |
-|[CL-lau/SQL-GPT](https://github.com/CL-lau/SQL-GPT) | 222 |
-|[alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 221 |
-|[showlab/UniVTG](https://github.com/showlab/UniVTG) | 220 |
-|[edreisMD/plugnplai](https://github.com/edreisMD/plugnplai) | 219 |
-|[hardbyte/qabot](https://github.com/hardbyte/qabot) | 216 |
-|[microsoft/azure-openai-in-a-day-workshop](https://github.com/microsoft/azure-openai-in-a-day-workshop) | 215 |
-|[Azure-Samples/chat-with-your-data-solution-accelerator](https://github.com/Azure-Samples/chat-with-your-data-solution-accelerator) | 214 |
-|[amadad/agentcy](https://github.com/amadad/agentcy) | 213 |
-|[snexus/llm-search](https://github.com/snexus/llm-search) | 212 |
-|[afaqueumer/DocQA](https://github.com/afaqueumer/DocQA) | 206 |
-|[plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 205 |
-|[yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 205 |
-|[benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 205 |
-|[voxel51/voxelgpt](https://github.com/voxel51/voxelgpt) | 204 |
-|[jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 204 |
-|[emarco177/ice_breaker](https://github.com/emarco177/ice_breaker) | 204 |
-|[tencentmusic/supersonic](https://github.com/tencentmusic/supersonic) | 202 |
-|[Azure-Samples/azure-search-power-skills](https://github.com/Azure-Samples/azure-search-power-skills) | 202 |
-|[blob42/Instrukt](https://github.com/blob42/Instrukt) | 201 |
-|[langchain-ai/langsmith-sdk](https://github.com/langchain-ai/langsmith-sdk) | 200 |
-|[SamPink/dev-gpt](https://github.com/SamPink/dev-gpt) | 200 |
-|[ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators) | 198 |
-|[KMnO4-zx/huanhuan-chat](https://github.com/KMnO4-zx/huanhuan-chat) | 196 |
-|[Azure-Samples/jp-azureopenai-samples](https://github.com/Azure-Samples/jp-azureopenai-samples) | 192 |
-|[hongbo-miao/hongbomiao.com](https://github.com/hongbo-miao/hongbomiao.com) | 190 |
-|[CakeCrusher/openplugin](https://github.com/CakeCrusher/openplugin) | 190 |
-|[PaddlePaddle/ERNIE-Bot-SDK](https://github.com/PaddlePaddle/ERNIE-Bot-SDK) | 189 |
-|[retr0reg/Ret2GPT](https://github.com/retr0reg/Ret2GPT) | 189 |
-|[AmineDiro/cria](https://github.com/AmineDiro/cria) | 187 |
-|[lancedb/vectordb-recipes](https://github.com/lancedb/vectordb-recipes) | 186 |
-|[vaibkumr/prompt-optimizer](https://github.com/vaibkumr/prompt-optimizer) | 185 |
-|[aws-ia/ecs-blueprints](https://github.com/aws-ia/ecs-blueprints) | 184 |
-|[ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 183 |
-|[MuhammadMoinFaisal/LargeLanguageModelsProjects](https://github.com/MuhammadMoinFaisal/LargeLanguageModelsProjects) | 182 |
-|[shauryr/S2QA](https://github.com/shauryr/S2QA) | 181 |
-|[summarizepaper/summarizepaper](https://github.com/summarizepaper/summarizepaper) | 180 |
-|[NomaDamas/RAGchain](https://github.com/NomaDamas/RAGchain) | 179 |
-|[pnkvalavala/repochat](https://github.com/pnkvalavala/repochat) | 179 |
-|[ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 177 |
-|[fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 177 |
-|[langchain-ai/text-split-explorer](https://github.com/langchain-ai/text-split-explorer) | 175 |
-|[iMagist486/ElasticSearch-Langchain-Chatglm2](https://github.com/iMagist486/ElasticSearch-Langchain-Chatglm2) | 175 |
-|[limaoyi1/Auto-PPT](https://github.com/limaoyi1/Auto-PPT) | 175 |
-|[Open-Swarm-Net/GPT-Swarm](https://github.com/Open-Swarm-Net/GPT-Swarm) | 175 |
-|[morpheuslord/HackBot](https://github.com/morpheuslord/HackBot) | 174 |
-|[v7labs/benchllm](https://github.com/v7labs/benchllm) | 174 |
-|[Coding-Crashkurse/Langchain-Full-Course](https://github.com/Coding-Crashkurse/Langchain-Full-Course) | 174 |
-|[dongyh20/Octopus](https://github.com/dongyh20/Octopus) | 173 |
-|[kimtth/azure-openai-llm-vector-langchain](https://github.com/kimtth/azure-openai-llm-vector-langchain) | 173 |
-|[mayooear/private-chatbot-mpt30b-langchain](https://github.com/mayooear/private-chatbot-mpt30b-langchain) | 173 |
-|[zilliztech/akcio](https://github.com/zilliztech/akcio) | 172 |
-|[jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 172 |
-|[ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 172 |
-|[joaomdmoura/CrewAI](https://github.com/joaomdmoura/CrewAI) | 170 |
-|[katanaml/llm-mistral-invoice-cpu](https://github.com/katanaml/llm-mistral-invoice-cpu) | 170 |
-|[chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 170 |
-|[mudler/LocalAGI](https://github.com/mudler/LocalAGI) | 167 |
-|[dssjon/biblos](https://github.com/dssjon/biblos) | 165 |
-|[kjappelbaum/gptchem](https://github.com/kjappelbaum/gptchem) | 165 |
-|[xxw1995/chatglm3-finetune](https://github.com/xxw1995/chatglm3-finetune) | 164 |
-|[ArjanCodes/examples](https://github.com/ArjanCodes/examples) | 163 |
-|[AIAnytime/Llama2-Medical-Chatbot](https://github.com/AIAnytime/Llama2-Medical-Chatbot) | 163 |
-|[RCGAI/SimplyRetrieve](https://github.com/RCGAI/SimplyRetrieve) | 162 |
-|[langchain-ai/langchain-teacher](https://github.com/langchain-ai/langchain-teacher) | 162 |
-|[menloparklab/falcon-langchain](https://github.com/menloparklab/falcon-langchain) | 162 |
-|[flurb18/AgentOoba](https://github.com/flurb18/AgentOoba) | 162 |
-|[homanp/vercel-langchain](https://github.com/homanp/vercel-langchain) | 161 |
-|[jiran214/langup-ai](https://github.com/jiran214/langup-ai) | 160 |
-|[JorisdeJong123/7-Days-of-LangChain](https://github.com/JorisdeJong123/7-Days-of-LangChain) | 160 |
-|[GoogleCloudPlatform/data-analytics-golden-demo](https://github.com/GoogleCloudPlatform/data-analytics-golden-demo) | 159 |
-|[positive666/Prompt-Can-Anything](https://github.com/positive666/Prompt-Can-Anything) | 159 |
-|[luisroque/large_laguage_models](https://github.com/luisroque/large_laguage_models) | 159 |
-|[mlops-for-all/mlops-for-all.github.io](https://github.com/mlops-for-all/mlops-for-all.github.io) | 158 |
-|[wandb/wandbot](https://github.com/wandb/wandbot) | 158 |
-|[elastic/elasticsearch-labs](https://github.com/elastic/elasticsearch-labs) | 157 |
-|[shroominic/funcchain](https://github.com/shroominic/funcchain) | 157 |
-|[deeppavlov/dream](https://github.com/deeppavlov/dream) | 156 |
-|[mluogh/eastworld](https://github.com/mluogh/eastworld) | 154 |
-|[georgesung/llm_qlora](https://github.com/georgesung/llm_qlora) | 154 |
-|[RUC-GSAI/YuLan-Rec](https://github.com/RUC-GSAI/YuLan-Rec) | 153 |
-|[KylinC/ChatFinance](https://github.com/KylinC/ChatFinance) | 152 |
-|[Dicklesworthstone/llama2_aided_tesseract](https://github.com/Dicklesworthstone/llama2_aided_tesseract) | 152 |
-|[c0sogi/LLMChat](https://github.com/c0sogi/LLMChat) | 152 |
-|[eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 152 |
-|[ErikBjare/gptme](https://github.com/ErikBjare/gptme) | 152 |
-|[Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 152 |
-|[RoboCoachTechnologies/ROScribe](https://github.com/RoboCoachTechnologies/ROScribe) | 151 |
-|[Aggregate-Intellect/sherpa](https://github.com/Aggregate-Intellect/sherpa) | 151 |
-|[3Alan/DocsMind](https://github.com/3Alan/DocsMind) | 151 |
-|[tangqiaoyu/ToolAlpaca](https://github.com/tangqiaoyu/ToolAlpaca) | 150 |
-|[kulltc/chatgpt-sql](https://github.com/kulltc/chatgpt-sql) | 150 |
-|[mallahyari/drqa](https://github.com/mallahyari/drqa) | 150 |
-|[MedalCollector/Orator](https://github.com/MedalCollector/Orator) | 149 |
-|[Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 149 |
-|[realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 148 |
-|[ssheng/BentoChain](https://github.com/ssheng/BentoChain) | 148 |
-|[solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 147 |
-|[aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 147 |
-|[Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 146 |
-|[menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 146 |
-|[trancethehuman/entities-extraction-web-scraper](https://github.com/trancethehuman/entities-extraction-web-scraper) | 144 |
-|[peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 144 |
-|[grumpyp/chroma-langchain-tutorial](https://github.com/grumpyp/chroma-langchain-tutorial) | 144 |
-|[gh18l/CrawlGPT](https://github.com/gh18l/CrawlGPT) | 142 |
-|[langchain-ai/langchain-aws-template](https://github.com/langchain-ai/langchain-aws-template) | 142 |
-|[yasyf/summ](https://github.com/yasyf/summ) | 141 |
-|[petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 141 |
-|[hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 140 |
-|[jina-ai/fastapi-serve](https://github.com/jina-ai/fastapi-serve) | 139 |
-|[zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 139 |
-|[jlonge4/local_llama](https://github.com/jlonge4/local_llama) | 139 |
-|[smyja/blackmaria](https://github.com/smyja/blackmaria) | 138 |
-|[ChuloAI/BrainChulo](https://github.com/ChuloAI/BrainChulo) | 137 |
-|[log1stics/voice-generator-webui](https://github.com/log1stics/voice-generator-webui) | 137 |
-|[davila7/file-gpt](https://github.com/davila7/file-gpt) | 137 |
-|[dcaribou/transfermarkt-datasets](https://github.com/dcaribou/transfermarkt-datasets) | 136 |
-|[ciare-robotics/world-creator](https://github.com/ciare-robotics/world-creator) | 135 |
-|[Undertone0809/promptulate](https://github.com/Undertone0809/promptulate) | 134 |
-|[fixie-ai/fixie-examples](https://github.com/fixie-ai/fixie-examples) | 134 |
-|[run-llama/ai-engineer-workshop](https://github.com/run-llama/ai-engineer-workshop) | 133 |
-|[definitive-io/code-indexer-loop](https://github.com/definitive-io/code-indexer-loop) | 131 |
-|[mortium91/langchain-assistant](https://github.com/mortium91/langchain-assistant) | 131 |
-|[baidubce/bce-qianfan-sdk](https://github.com/baidubce/bce-qianfan-sdk) | 130 |
-|[Ngonie-x/langchain_csv](https://github.com/Ngonie-x/langchain_csv) | 130 |
-|[IvanIsCoding/ResuLLMe](https://github.com/IvanIsCoding/ResuLLMe) | 130 |
-|[AnchoringAI/anchoring-ai](https://github.com/AnchoringAI/anchoring-ai) | 129 |
-|[Azure/business-process-automation](https://github.com/Azure/business-process-automation) | 128 |
-|[athina-ai/athina-sdk](https://github.com/athina-ai/athina-sdk) | 126 |
-|[thunlp/ChatEval](https://github.com/thunlp/ChatEval) | 126 |
-|[prof-frink-lab/slangchain](https://github.com/prof-frink-lab/slangchain) | 126 |
-|[vietanhdev/pautobot](https://github.com/vietanhdev/pautobot) | 125 |
-|[awslabs/generative-ai-cdk-constructs](https://github.com/awslabs/generative-ai-cdk-constructs) | 124 |
-|[sdaaron/QueryGPT](https://github.com/sdaaron/QueryGPT) | 124 |
-|[rabbitmetrics/langchain-13-min](https://github.com/rabbitmetrics/langchain-13-min) | 124 |
-|[AutoLLM/AutoAgents](https://github.com/AutoLLM/AutoAgents) | 122 |
-|[nicknochnack/Nopenai](https://github.com/nicknochnack/Nopenai) | 122 |
-|[wombyz/HormoziGPT](https://github.com/wombyz/HormoziGPT) | 122 |
-|[dotvignesh/PDFChat](https://github.com/dotvignesh/PDFChat) | 122 |
-|[topoteretes/PromethAI-Backend](https://github.com/topoteretes/PromethAI-Backend) | 121 |
-|[nftblackmagic/flask-langchain](https://github.com/nftblackmagic/flask-langchain) | 121 |
-|[vishwasg217/finsight](https://github.com/vishwasg217/finsight) | 120 |
-|[snap-stanford/MLAgentBench](https://github.com/snap-stanford/MLAgentBench) | 120 |
-|[Azure/app-service-linux-docs](https://github.com/Azure/app-service-linux-docs) | 120 |
-|[nyanp/chat2plot](https://github.com/nyanp/chat2plot) | 120 |
-|[ant4g0nist/polar](https://github.com/ant4g0nist/polar) | 119 |
-|[aws-samples/cdk-eks-blueprints-patterns](https://github.com/aws-samples/cdk-eks-blueprints-patterns) | 119 |
-|[aws-samples/amazon-kendra-langchain-extensions](https://github.com/aws-samples/amazon-kendra-langchain-extensions) | 119 |
-|[Xueheng-Li/SynologyChatbotGPT](https://github.com/Xueheng-Li/SynologyChatbotGPT) | 119 |
-|[CodeAlchemyAI/ViLT-GPT](https://github.com/CodeAlchemyAI/ViLT-GPT) | 117 |
-|[Lin-jun-xiang/docGPT-langchain](https://github.com/Lin-jun-xiang/docGPT-langchain) | 117 |
-|[ademakdogan/ChatSQL](https://github.com/ademakdogan/ChatSQL) | 116 |
-|[aniketmaurya/llm-inference](https://github.com/aniketmaurya/llm-inference) | 115 |
-|[xuwenhao/mactalk-ai-course](https://github.com/xuwenhao/mactalk-ai-course) | 115 |
-|[cmooredev/RepoReader](https://github.com/cmooredev/RepoReader) | 115 |
-|[abi/autocommit](https://github.com/abi/autocommit) | 115 |
-|[MIDORIBIN/langchain-gpt4free](https://github.com/MIDORIBIN/langchain-gpt4free) | 114 |
-|[finaldie/auto-news](https://github.com/finaldie/auto-news) | 114 |
-|[Anil-matcha/Youtube-to-chatbot](https://github.com/Anil-matcha/Youtube-to-chatbot) | 114 |
-|[avrabyt/MemoryBot](https://github.com/avrabyt/MemoryBot) | 114 |
-|[Capsize-Games/airunner](https://github.com/Capsize-Games/airunner) | 113 |
-|[atisharma/llama_farm](https://github.com/atisharma/llama_farm) | 113 |
-|[mbchang/data-driven-characters](https://github.com/mbchang/data-driven-characters) | 112 |
-|[fiddler-labs/fiddler-auditor](https://github.com/fiddler-labs/fiddler-auditor) | 112 |
-|[dirkjbreeuwer/gpt-automated-web-scraper](https://github.com/dirkjbreeuwer/gpt-automated-web-scraper) | 111 |
-|[Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding](https://github.com/Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding) | 111 |
-|[hwchase17/langchain-gradio-template](https://github.com/hwchase17/langchain-gradio-template) | 111 |
-|[artas728/spelltest](https://github.com/artas728/spelltest) | 110 |
-|[NVIDIA/GenerativeAIExamples](https://github.com/NVIDIA/GenerativeAIExamples) | 109 |
-|[Azure/aistudio-copilot-sample](https://github.com/Azure/aistudio-copilot-sample) | 108 |
-|[codefuse-ai/codefuse-chatbot](https://github.com/codefuse-ai/codefuse-chatbot) | 108 |
-|[apirrone/Memento](https://github.com/apirrone/Memento) | 108 |
-|[e-johnstonn/GPT-Doc-Summarizer](https://github.com/e-johnstonn/GPT-Doc-Summarizer) | 108 |
-|[salesforce/BOLAA](https://github.com/salesforce/BOLAA) | 107 |
-|[Erol444/gpt4-openai-api](https://github.com/Erol444/gpt4-openai-api) | 106 |
-|[linjungz/chat-with-your-doc](https://github.com/linjungz/chat-with-your-doc) | 106 |
-|[crosleythomas/MirrorGPT](https://github.com/crosleythomas/MirrorGPT) | 106 |
-|[panaverse/learn-generative-ai](https://github.com/panaverse/learn-generative-ai) | 105 |
-|[Azure/azure-sdk-tools](https://github.com/Azure/azure-sdk-tools) | 105 |
-|[malywut/gpt_examples](https://github.com/malywut/gpt_examples) | 105 |
-|[ritun16/chain-of-verification](https://github.com/ritun16/chain-of-verification) | 104 |
-|[langchain-ai/langchain-benchmarks](https://github.com/langchain-ai/langchain-benchmarks) | 104 |
-|[lightninglabs/LangChainBitcoin](https://github.com/lightninglabs/LangChainBitcoin) | 104 |
-|[flepied/second-brain-agent](https://github.com/flepied/second-brain-agent) | 103 |
-|[llmapp/openai.mini](https://github.com/llmapp/openai.mini) | 102 |
-|[gimlet-ai/tddGPT](https://github.com/gimlet-ai/tddGPT) | 102 |
-|[jlonge4/gpt_chatwithPDF](https://github.com/jlonge4/gpt_chatwithPDF) | 102 |
-|[agentification/RAFA_code](https://github.com/agentification/RAFA_code) | 101 |
-|[pacman100/DHS-LLM-Workshop](https://github.com/pacman100/DHS-LLM-Workshop) | 101 |
-|[aws-samples/private-llm-qa-bot](https://github.com/aws-samples/private-llm-qa-bot) | 101 |
-
-
-_Generated by [github-dependents-info](https://github.com/nvuillam/github-dependents-info)_
-
-`github-dependents-info --repo "langchain-ai/langchain" --markdownfile dependents.md --minstars 100 --sort stars`
diff --git a/langchain_md_files/additional_resources/tutorials.mdx b/langchain_md_files/additional_resources/tutorials.mdx
deleted file mode 100644
index 871b12e40812f9ffd2f732241639cf1b0e2c73db..0000000000000000000000000000000000000000
--- a/langchain_md_files/additional_resources/tutorials.mdx
+++ /dev/null
@@ -1,52 +0,0 @@
-# 3rd Party Tutorials
-
-##  Tutorials
-
-### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)
-### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)
-### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)
-### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)
-### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)
-### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)
-### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)
-### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)
-### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)
-### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)
-### [by Total Technology Zonne](https://youtube.com/playlist?list=PLI8raxzYtfGyE02fAxiM1CPhLUuqcTLWg&si=fkAye16rQKBJVHc9)
-
-## Courses
-
-### Featured courses on Deeplearning.AI
-
-- [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/)
-- [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/)
-- [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/)
-- [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/)
-
-### Online courses
-
-- [Udemy](https://www.udemy.com/courses/search/?q=langchain)
-- [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain)
-- [Pluralsight](https://www.pluralsight.com/search?q=langchain)
-- [Coursera](https://www.coursera.org/search?query=langchain)
-- [Maven](https://maven.com/courses?query=langchain)
-- [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)
-- [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)
-- [edX](https://www.edx.org/search?q=langchain)
-- [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)
-
-## Short Tutorials
-
-- [by Nicholas Renotte](https://youtu.be/MlK6SIjcjE8)
-- [by Patrick Loeber](https://youtu.be/LbT1yp6quS8)
-- [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)
-- [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)
-
-## Books and Handbooks
-
-- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
-- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
-- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
-- [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)
-
----------------------
diff --git a/langchain_md_files/additional_resources/youtube.mdx b/langchain_md_files/additional_resources/youtube.mdx
deleted file mode 100644
index cf694573f0631765149c3ffb59bddae59b5fe779..0000000000000000000000000000000000000000
--- a/langchain_md_files/additional_resources/youtube.mdx
+++ /dev/null
@@ -1,63 +0,0 @@
-# YouTube videos
-
-[Updated 2024-05-16]
-
-### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)
-
-### [Tutorials on YouTube](/docs/additional_resources/tutorials/#tutorials)
-
-## Videos (sorted by views)
-
-Only videos with 40K+ views:
-
-- [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)
-- [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)
-- [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)
-- [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)
-- [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)
-- [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)
-- [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)
-- [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)
-- [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)
-- [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)
-- [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)
-- [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)
-- [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)
-- [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)
-- [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)
-- [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)
-- [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)
-- [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)
-- [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)
-- [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)
-- [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)
-- [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)
-- [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)
-- [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)
-- [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)
-- [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)
-- [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)
-- [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)
-- [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)
-- [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)
-- [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)
-- [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)
-- [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)
-- [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)
-- [Prompt Engineering And LLM's With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)
-- [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)
-- [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)
-- [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)
-- [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)
-- [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)
-- [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)
-- [What's next for AI agents ft. LangChain's Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)
-- [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)
-- [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)
-- [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)
-- [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)
-- [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)
-- [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)
-
----------------------
-[Updated 2024-05-16]
diff --git a/langchain_md_files/changes/changelog/core.mdx b/langchain_md_files/changes/changelog/core.mdx
deleted file mode 100644
index 63c9c3f8c800ccba399dea6052bf5babeb8f7c16..0000000000000000000000000000000000000000
--- a/langchain_md_files/changes/changelog/core.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
-# langchain-core
-
-## 0.1.x
-
-#### Deprecated
-
-- `BaseChatModel` methods `__call__`, `call_as_llm`, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.invoke` instead.
-- `BaseChatModel` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.ainvoke` instead.
-- `BaseLLM` methods `__call__, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseLLM.invoke` instead.
-- `BaseLLM` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseLLM.ainvoke` instead.
\ No newline at end of file
diff --git a/langchain_md_files/changes/changelog/langchain.mdx b/langchain_md_files/changes/changelog/langchain.mdx
deleted file mode 100644
index 04a7d8d9dcdf8cd448dd34e05622cb0d02629443..0000000000000000000000000000000000000000
--- a/langchain_md_files/changes/changelog/langchain.mdx
+++ /dev/null
@@ -1,93 +0,0 @@
-# langchain
-
-## 0.2.0
-
-### Deleted
-
-As of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain`  should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly.
-
-The following functions and classes require an explicit LLM to be passed as an argument:
-
-- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit`
-- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit`
-- `langchain.chains.openai_functions.get_openapi_chain`
-- `langchain.chains.router.MultiRetrievalQAChain.from_retrievers`
-- `langchain.indexes.VectorStoreIndexWrapper.query`
-- `langchain.indexes.VectorStoreIndexWrapper.query_with_sources`
-- `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources`
-- `langchain.chains.flare.FlareChain`
-
-The following classes now require passing an explicit Embedding model as an argument:
-
-- `langchain.indexes.VectostoreIndexCreator`
-
-The following code has been removed:
-
-- `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method.
-
-### Deprecated
-
-We have two main types of deprecations:
-
-1. Code that was moved from `langchain` into another package (e.g, `langchain-community`)
-
-If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
-
-```python
-python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
-
-```
-
-```python
-LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
-
->> from langchain.document_loaders import UnstructuredMarkdownLoader
-
-with new imports of:
-
->> from langchain_community.document_loaders import UnstructuredMarkdownLoader
-```
-
-We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
-
-However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, we’re releasing a migration script via the LangChain CLI. See further instructions in migration guide.
-
-1. Code that has better alternatives available and will eventually be removed, so there’s only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
-
-Many of these were marked for removal in 0.2. We have bumped the removal to 0.3.
-
-
-## 0.1.0 (Jan 5, 2024)
-
-### Deleted
-
-No deletions.
-
-### Deprecated
-
-Deprecated classes and methods will be removed in 0.2.0
-
-| Deprecated                      | Alternative                       | Reason                                         |
-|---------------------------------|-----------------------------------|------------------------------------------------|
-| ChatVectorDBChain               | ConversationalRetrievalChain      | More general to all retrievers                 |
-| create_ernie_fn_chain           | create_ernie_fn_runnable          | Use LCEL under the hood                        |
-| created_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood                        |
-| NatBotChain                     |                                   | Not used                                       |
-| create_openai_fn_chain          | create_openai_fn_runnable         | Use LCEL under the hood                        |
-| create_structured_output_chain  | create_structured_output_runnable | Use LCEL under the hood                        |
-| load_query_constructor_chain    | load_query_constructor_runnable   | Use LCEL under the hood                        |
-| VectorDBQA                      | RetrievalQA                       | More general to all retrievers                 |
-| Sequential Chain                | LCEL                              | Obviated by LCEL                               |
-| SimpleSequentialChain           | LCEL                              | Obviated by LCEL                               |
-| TransformChain                  | LCEL/RunnableLambda               | Obviated by LCEL                               |
-| create_tagging_chain            | create_structured_output_runnable | Use LCEL under the hood                        |
-| ChatAgent                       | create_react_agent                | Use LCEL builder over a class                  |
-| ConversationalAgent             | create_react_agent                | Use LCEL builder over a class                  |
-| ConversationalChatAgent         | create_json_chat_agent            | Use LCEL builder over a class                  |
-| initialize_agent                | Individual create agent methods   | Individual create agent methods are more clear |
-| ZeroShotAgent                   | create_react_agent                | Use LCEL builder over a class                  |
-| OpenAIFunctionsAgent            | create_openai_functions_agent     | Use LCEL builder over a class                  |
-| OpenAIMultiFunctionsAgent       | create_openai_tools_agent         | Use LCEL builder over a class                  |
-| SelfAskWithSearchAgent          | create_self_ask_with_search       | Use LCEL builder over a class                  |
-| StructuredChatAgent             | create_structured_chat_agent      | Use LCEL builder over a class                  |
-| XMLAgent                        | create_xml_agent                  | Use LCEL builder over a class                  |
\ No newline at end of file
diff --git a/langchain_md_files/concepts/agents.mdx b/langchain_md_files/concepts/agents.mdx
deleted file mode 100644
index 960eb2a975d1e46aaf49147cd8eff3747665089f..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/agents.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Agents
-
-By themselves, language models can't take actions - they just output text. Agents are systems that take a high-level task and use an LLM as a reasoning engine to decide what actions to take and execute those actions.
-
-[LangGraph](/docs/concepts/architecture#langgraph) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents. We recommend that you use LangGraph for building agents.
-
-Please see the following resources for more information:
-
-* LangGraph docs on [common agent architectures](https://langchain-ai.github.io/langgraph/concepts/agentic_concepts/)
-* [Pre-built agents in LangGraph](https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.chat_agent_executor.create_react_agent)
-
-## Legacy agent concept: AgentExecutor
-
-LangChain previously introduced the `AgentExecutor` as a runtime for agents. 
-While it served as an excellent starting point, its limitations became apparent when dealing with more sophisticated and customized agents. 
-As a result, we're gradually phasing out `AgentExecutor` in favor of more flexible solutions in LangGraph.
-
-### Transitioning from AgentExecutor to langgraph
-
-If you're currently using `AgentExecutor`, don't worry! We've prepared resources to help you:
-
-1. For those who still need to use `AgentExecutor`, we offer a comprehensive guide on [how to use AgentExecutor](/docs/how_to/agent_executor).
-
-2. However, we strongly recommend transitioning to LangGraph for improved flexibility and control. To facilitate this transition, we've created a detailed [migration guide](/docs/how_to/migrate_agent) to help you move from `AgentExecutor` to LangGraph seamlessly.
-
diff --git a/langchain_md_files/concepts/architecture.mdx b/langchain_md_files/concepts/architecture.mdx
deleted file mode 100644
index 66272190080b91bc5b6f4ee8c2647a2197f8caa8..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/architecture.mdx
+++ /dev/null
@@ -1,78 +0,0 @@
-import ThemedImage from '@theme/ThemedImage';
-import useBaseUrl from '@docusaurus/useBaseUrl';
-
-# Architecture
-
-LangChain is a framework that consists of a number of packages.
-
-<ThemedImage
-    alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
-    sources={{
-        light: useBaseUrl('/svg/langchain_stack_112024.svg'),
-        dark: useBaseUrl('/svg/langchain_stack_112024_dark.svg'),
-    }}
-    title="LangChain Framework Overview"
-    style={{ width: "100%" }}
-/>
-
-
-## langchain-core
-
-This package contains base abstractions for different components and ways to compose them together.
-The interfaces for core components like chat models, vector stores, tools and more are defined here.
-No third-party integrations are defined here.
-The dependencies are very lightweight.
-
-## langchain
-
-The main `langchain` package contains chains and retrieval strategies that make up an application's cognitive architecture.
-These are NOT third-party integrations.
-All chains, agents, and retrieval strategies here are NOT specific to any one integration, but rather generic across all integrations.
-
-## Integration packages
-
-Popular integrations have their own packages (e.g. `langchain-openai`, `langchain-anthropic`, etc) so that they can be properly versioned and appropriately lightweight.
-
-For more information see:
-
-* A list [integrations packages](/docs/integrations/providers/)
-* The [API Reference](https://python.langchain.com/api_reference/) where you can find detailed information about each of the integration package.
-
-## langchain-community
-
-This package contains third-party integrations that are maintained by the LangChain community.
-Key integration packages are separated out (see above).
-This contains integrations for various components (chat models, vector stores, tools, etc).
-All dependencies in this package are optional to keep the package as lightweight as possible.
-
-## langgraph
-
-`langgraph` is an extension of `langchain` aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
-
-LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows.
-
-:::info[Further reading]
-
-* See our LangGraph overview [here](https://langchain-ai.github.io/langgraph/concepts/high_level/#core-principles).
-* See our LangGraph Academy Course [here](https://academy.langchain.com/courses/intro-to-langgraph).
-
-:::
-
-## langserve
-
-A package to deploy LangChain chains as REST APIs. Makes it easy to get a production ready API up and running.
-
-:::important
-LangServe is designed to primarily deploy simple Runnables and work with well-known primitives in langchain-core.
-
-If you need a deployment option for LangGraph, you should instead be looking at LangGraph Platform (beta) which will be better suited for deploying LangGraph applications.
-:::
-
-For more information, see the [LangServe documentation](/docs/langserve).
-
-
-## LangSmith
-
-A developer platform that lets you debug, test, evaluate, and monitor LLM applications.
-
-For more information, see the [LangSmith documentation](https://docs.smith.langchain.com)
diff --git a/langchain_md_files/concepts/async.mdx b/langchain_md_files/concepts/async.mdx
deleted file mode 100644
index d81c706db243bd0362fb512f035ddc11e67309a3..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/async.mdx
+++ /dev/null
@@ -1,81 +0,0 @@
-# Async programming with langchain
-
-:::info Prerequisites
-* [Runnable interface](/docs/concepts/runnables)
-* [asyncio](https://docs.python.org/3/library/asyncio.html)
-:::
-
-LLM based applications often involve a lot of I/O-bound operations, such as making API calls to language models, databases, or other services. Asynchronous programming (or async programming) is a paradigm that allows a program to perform multiple tasks concurrently without blocking the execution of other tasks, improving efficiency and responsiveness, particularly in I/O-bound operations.
-
-:::note
-You are expected to be familiar with asynchronous programming in Python before reading this guide. If you are not, please find appropriate resources online to learn how to program asynchronously in Python.
-This guide specifically focuses on what you need to know to work with LangChain in an asynchronous context, assuming that you are already familiar with asynch
-:::
-
-## Langchain asynchronous APIs
-
-Many LangChain APIs are designed to be asynchronous, allowing you to build efficient and responsive applications.
-
-Typically, any method that may perform I/O operations (e.g., making API calls, reading files) will have an asynchronous counterpart.
-
-In LangChain, async implementations are located in the same classes as their synchronous counterparts, with the asynchronous methods having an "a" prefix. For example, the synchronous `invoke` method has an asynchronous counterpart called `ainvoke`.
-
-Many components of LangChain implement the [Runnable Interface](/docs/concepts/runnables), which includes support for asynchronous execution. This means that you can run Runnables asynchronously using the `await` keyword in Python.
-
-```python
-await some_runnable.ainvoke(some_input)
-```
-
-Other components like [Embedding Models](/docs/concepts/embedding_models) and [VectorStore](/docs/concepts/vectorstores) that do not implement the [Runnable Interface](/docs/concepts/runnables) usually still follow the same rule and include the asynchronous version of method in the same class with an "a" prefix.
-
-For example,
-
-```python
-await some_vectorstore.aadd_documents(documents)
-```
-
-Runnables created using the [LangChain Expression Language (LCEL)](/docs/concepts/lcel) can also be run asynchronously as they implement
-the full [Runnable Interface](/docs/concepts/runnables).
-
-For more information, please review the [API reference](https://python.langchain.com/api_reference/) for the specific component you are using.
-
-## Delegation to sync methods
-
-Most popular LangChain integrations implement asynchronous support of their APIs. For example, the `ainvoke` method of many ChatModel implementations uses the `httpx.AsyncClient` to make asynchronous HTTP requests to the model provider's API.
-
-When an asynchronous implementation is not available, LangChain tries to provide a default implementation, even if it incurs
-a **slight** overhead.
-
-By default, LangChain will delegate the execution of unimplemented asynchronous methods to the synchronous counterparts. LangChain almost always assumes that the synchronous method should be treated as a blocking operation and should be run in a separate thread.
-This is done using [asyncio.loop.run_in_executor](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) functionality provided by the `asyncio` library. LangChain uses the default executor provided by the `asyncio` library, which lazily initializes a thread pool executor with a default number of threads that is reused in the given event loop. While this strategy incurs a slight overhead due to context switching between threads, it guarantees that every asynchronous method has a default implementation that works out of the box.
-
-## Performance
-
-Async code in LangChain should generally perform relatively well with minimal overhead out of the box, and is unlikely
-to be a bottleneck in most applications.
-
-The two main sources of overhead are:
-
-1. Cost of context switching between threads when [delegating to synchronous methods](#delegation-to-sync-methods). This can be addressed by providing a native asynchronous implementation.
-2. In [LCEL](/docs/concepts/lcel) any "cheap functions" that appear as part of the chain will be either scheduled as tasks on the event loop (if they are async) or run in a separate thread (if they are sync), rather than just be run inline.
-
-The latency overhead you should expect from these is between tens of microseconds to a few milliseconds.
-
-A more common source of performance issues arises from users accidentally blocking the event loop by calling synchronous code in an async context (e.g., calling `invoke` rather than `ainvoke`).
-
-## Compatibility
-
-LangChain is only compatible with the `asyncio` library, which is distributed as part of the Python standard library. It will not work with other async libraries like `trio` or `curio`.
-
-In Python 3.9 and 3.10, [asyncio's tasks](https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task) did not
-accept a `context` parameter. Due to this limitation, LangChain cannot automatically propagate the `RunnableConfig` down the call chain
-in certain scenarios.
-
-If you are experiencing issues with streaming, callbacks or tracing in async code and are using Python 3.9 or 3.10, this is a likely cause.
-
-Please read [Propagation RunnableConfig](/docs/concepts/runnables/#propagation-of-runnableconfig) for more details to learn how to propagate the `RunnableConfig` down the call chain manually (or upgrade to Python 3.11 where this is no longer an issue).
-
-## How to use in ipython and jupyter notebooks
-
-As of IPython 7.0, IPython supports asynchronous REPLs. This means that you can use the `await` keyword in the IPython REPL and Jupyter Notebooks without any additional setup. For more information, see the [IPython blog post](https://blog.jupyter.org/ipython-7-0-async-repl-a35ce050f7f7).
-
diff --git a/langchain_md_files/concepts/callbacks.mdx b/langchain_md_files/concepts/callbacks.mdx
deleted file mode 100644
index 5a6691baa5a34ce82fc8a2515ddc3ed7467b4688..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/callbacks.mdx
+++ /dev/null
@@ -1,73 +0,0 @@
-# Callbacks
-
-:::note Prerequisites
-- [Runnable interface](/docs/concepts/runnables)
-:::
-
-LangChain provides a callback system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
-
-You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail.
-
-## Callback events
-
-| Event            | Event Trigger                               | Associated Method     |
-|------------------|---------------------------------------------|-----------------------|
-| Chat model start | When a chat model starts                    | `on_chat_model_start` |
-| LLM start        | When a llm starts                           | `on_llm_start`        |
-| LLM new token    | When an llm OR chat model emits a new token | `on_llm_new_token`    |
-| LLM ends         | When an llm OR chat model ends              | `on_llm_end`          |
-| LLM errors       | When an llm OR chat model errors            | `on_llm_error`        |
-| Chain start      | When a chain starts running                 | `on_chain_start`      |
-| Chain end        | When a chain ends                           | `on_chain_end`        |
-| Chain error      | When a chain errors                         | `on_chain_error`      |
-| Tool start       | When a tool starts running                  | `on_tool_start`       |
-| Tool end         | When a tool ends                            | `on_tool_end`         |
-| Tool error       | When a tool errors                          | `on_tool_error`       |
-| Agent action     | When an agent takes an action               | `on_agent_action`     |
-| Agent finish     | When an agent ends                          | `on_agent_finish`     |
-| Retriever start  | When a retriever starts                     | `on_retriever_start`  |
-| Retriever end    | When a retriever ends                       | `on_retriever_end`    |
-| Retriever error  | When a retriever errors                     | `on_retriever_error`  |
-| Text             | When arbitrary text is run                  | `on_text`             |
-| Retry            | When a retry event is run                   | `on_retry`            |
-
-## Callback handlers
-
-Callback handlers can either be `sync` or `async`:
-
-* Sync callback handlers implement the [BaseCallbackHandler](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface.
-* Async callback handlers implement the [AsyncCallbackHandler](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface.
-
-During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered.
-
-## Passing callbacks
-
-The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
-
-- **Request time callbacks**: Passed at the time of the request in addition to the input data.
-Available on all standard `Runnable` objects. These callbacks are INHERITED by all children
-of the object they are defined on. For example, `chain.invoke({"number": 25}, {"callbacks": [handler]})`.
-- **Constructor callbacks**: `chain = TheNameOfSomeChain(callbacks=[handler])`. These callbacks
-are passed as arguments to the constructor of the object. The callbacks are scoped
-only to the object they are defined on, and are **not** inherited by any children of the object.
-
-:::warning
-Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children
-of the object.
-:::
-
-If you're creating a custom chain or runnable, you need to remember to propagate request time
-callbacks to any child objects.
-
-:::important Async in Python&lt;=3.10
-
-Any `RunnableLambda`, a `RunnableGenerator`, or `Tool` that invokes other runnables
-and is running `async` in python&lt;=3.10, will have to propagate callbacks to child
-objects manually. This is because LangChain cannot automatically propagate
-callbacks to child objects in this case.
-
-This is a common reason why you may fail to see events being emitted from custom
-runnables or tools.
-:::
-
-For specifics on how to use callbacks, see the [relevant how-to guides here](/docs/how_to/#callbacks).
\ No newline at end of file
diff --git a/langchain_md_files/concepts/chat_history.mdx b/langchain_md_files/concepts/chat_history.mdx
deleted file mode 100644
index 57d22c2735376997f0815aff037f95ef11b9c791..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/chat_history.mdx
+++ /dev/null
@@ -1,46 +0,0 @@
-# Chat history
-
-:::info Prerequisites
-
-- [Messages](/docs/concepts/messages)
-- [Chat models](/docs/concepts/chat_models)
-- [Tool calling](/docs/concepts/tool_calling)
-:::
-
-Chat history is a record of the conversation between the user and the chat model. It is used to maintain context and state throughout the conversation. The chat history is sequence of [messages](/docs/concepts/messages), each of which is associated with a specific [role](/docs/concepts/messages#role), such as "user", "assistant", "system", or "tool".
-
-## Conversation patterns
-
-![Conversation patterns](/img/conversation_patterns.png)
-
-Most conversations start with a **system message** that sets the context for the conversation. This is followed by a **user message** containing the user's input, and then an **assistant message** containing the model's response.
-
-The **assistant** may respond directly to the user or if configured with tools request that a [tool](/docs/concepts/tool_calling) be invoked to perform a specific task.
-
-A full conversation often involves a combination of two patterns of alternating messages:
-
-1. The **user** and the **assistant** representing a back-and-forth conversation.
-2. The **assistant** and **tool messages** representing an ["agentic" workflow](/docs/concepts/agents) where the assistant is invoking tools to perform specific tasks.
-
-## Managing chat history
-
-Since chat models have a maximum limit on input size, it's important to manage chat history and trim it as needed to avoid exceeding the [context window](/docs/concepts/chat_models/#context-window).
-
-While processing chat history, it's essential to preserve a correct conversation structure. 
-
-Key guidelines for managing chat history:
-
-- The conversation should follow one of these structures:
-    - The first message is either a "user" message or a "system" message, followed by a "user" and then an "assistant" message.
-    - The last message should be either a "user" message or a "tool" message containing the result of a tool call.
-- When using [tool calling](/docs/concepts/tool_calling), a "tool" message should only follow an "assistant" message that requested the tool invocation.
-
-:::tip
-Understanding correct conversation structure is essential for being able to properly implement
-[memory](https://langchain-ai.github.io/langgraph/concepts/memory/) in chat models.
-:::
-
-## Related resources
-
-- [How to trim messages](/docs/how_to/trim_messages/)
-- [Memory guide](https://langchain-ai.github.io/langgraph/concepts/memory/) for information on implementing short-term and long-term memory in chat models using [LangGraph](https://langchain-ai.github.io/langgraph/).
diff --git a/langchain_md_files/concepts/chat_models.mdx b/langchain_md_files/concepts/chat_models.mdx
deleted file mode 100644
index 03133a253e582a587f4578b114e83486fb1f7bab..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/chat_models.mdx
+++ /dev/null
@@ -1,168 +0,0 @@
-# Chat models
-
-## Overview
-
-Large Language Models (LLMs) are advanced machine learning models that excel in a wide range of language-related tasks such as text generation, translation, summarization, question answering, and more, without needing task-specific fine tuning for every scenario.
-
-Modern LLMs are typically accessed through a chat model interface that takes a list of [messages](/docs/concepts/messages) as input and returns a [message](/docs/concepts/messages) as output.
-
-The newest generation of chat models offer additional capabilities:
-
-* [Tool calling](/docs/concepts/tool_calling): Many popular chat models offer a native [tool calling](/docs/concepts/tool_calling) API. This API allows developers to build rich applications that enable LLMs to interact with external services, APIs, and databases. Tool calling can also be used to extract structured information from unstructured data and perform various other tasks.
-* [Structured output](/docs/concepts/structured_outputs): A technique to make a chat model respond in a structured format, such as JSON that matches a given schema.
-* [Multimodality](/docs/concepts/multimodality): The ability to work with data other than text; for example, images, audio, and video.
-
-## Features
-
-LangChain provides a consistent interface for working with chat models from different providers while offering additional features for monitoring, debugging, and optimizing the performance of applications that use LLMs.
-
-* Integrations with many chat model providers (e.g., Anthropic, OpenAI, Ollama, Microsoft Azure, Google Vertex, Amazon Bedrock, Hugging Face, Cohere, Groq). Please see [chat model integrations](/docs/integrations/chat/) for an up-to-date list of supported models.
-* Use either LangChain's [messages](/docs/concepts/messages) format or OpenAI format.
-* Standard [tool calling API](/docs/concepts/tool_calling): standard interface for binding tools to models, accessing tool call requests made by models, and sending tool results back to the model.
-* Standard API for [structuring outputs](/docs/concepts/structured_outputs/#structured-output-method) via the `with_structured_output` method.
-* Provides support for [async programming](/docs/concepts/async), [efficient batching](/docs/concepts/runnables/#optimized-parallel-execution-batch), [a rich streaming API](/docs/concepts/streaming).
-* Integration with [LangSmith](https://docs.smith.langchain.com) for monitoring and debugging production-grade applications based on LLMs.
-* Additional features like standardized [token usage](/docs/concepts/messages/#aimessage), [rate limiting](#rate-limiting), [caching](#caching) and more.
-
-## Integrations
-
-LangChain has many chat model integrations that allow you to use a wide variety of models from different providers.
-
-These integrations are one of two types:
-
-1. **Official models**: These are models that are officially supported by LangChain and/or model provider. You can find these models in the `langchain-<provider>` packages.
-2. **Community models**: There are models that are mostly contributed and supported by the community. You can find these models in the `langchain-community` package.
-
-LangChain chat models are named with a convention that prefixes "Chat" to their class names (e.g., `ChatOllama`, `ChatAnthropic`, `ChatOpenAI`, etc.).
-
-Please review the [chat model integrations](/docs/integrations/chat/) for a list of supported models.
-
-:::note
-Models that do **not** include the prefix "Chat" in their name or include "LLM" as a suffix in their name typically refer to older models that do not follow the chat model interface and instead use an interface that takes a string as input and returns a string as output.
-:::
-
-
-## Interface
-
-LangChain chat models implement the [BaseChatModel](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface. Because `BaseChatModel` also implements the [Runnable Interface](/docs/concepts/runnables), chat models support a [standard streaming interface](/docs/concepts/streaming), [async programming](/docs/concepts/async), optimized [batching](/docs/concepts/runnables/#optimized-parallel-execution-batch), and more. Please see the [Runnable Interface](/docs/concepts/runnables) for more details.
-
-Many of the key methods of chat models operate on [messages](/docs/concepts/messages) as input and return messages as output.
-
-Chat models offer a standard set of parameters that can be used to configure the model. These parameters are typically used to control the behavior of the model, such as the temperature of the output, the maximum number of tokens in the response, and the maximum time to wait for a response. Please see the [standard parameters](#standard-parameters) section for more details.
-
-:::note
-In documentation, we will often use the terms "LLM" and "Chat Model" interchangeably. This is because most modern LLMs are exposed to users via a chat model interface.
-
-However, LangChain also has implementations of older LLMs that do not follow the chat model interface and instead use an interface that takes a string as input and returns a string as output. These models are typically named without the "Chat" prefix (e.g., `Ollama`, `Anthropic`, `OpenAI`, etc.).
-These models implement the [BaseLLM](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.llms.BaseLLM.html#langchain_core.language_models.llms.BaseLLM) interface and may be named with the "LLM" suffix (e.g., `OllamaLLM`, `AnthropicLLM`, `OpenAILLM`, etc.). Generally, users should not use these models.
-:::
-
-### Key methods
-
-The key methods of a chat model are:
-
-1. **invoke**: The primary method for interacting with a chat model. It takes a list of [messages](/docs/concepts/messages) as input and returns a list of messages as output.
-2. **stream**: A method that allows you to stream the output of a chat model as it is generated.
-3. **batch**: A method that allows you to batch multiple requests to a chat model together for more efficient processing.
-4. **bind_tools**: A method that allows you to bind a tool to a chat model for use in the model's execution context.
-5. **with_structured_output**: A wrapper around the `invoke` method for models that natively support [structured output](/docs/concepts/structured_outputs).
-
-Other important methods can be found in the [BaseChatModel API Reference](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html).
-
-### Inputs and outputs
-
-Modern LLMs are typically accessed through a chat model interface that takes [messages](/docs/concepts/messages) as input and returns [messages](/docs/concepts/messages) as output. Messages are typically associated with a role (e.g., "system", "human", "assistant") and one or more content blocks that contain text or potentially multimodal data (e.g., images, audio, video).
-
-LangChain supports two message formats to interact with chat models:
-
-1. **LangChain Message Format**: LangChain's own message format, which is used by default and is used internally by LangChain.
-2. **OpenAI's Message Format**: OpenAI's message format.
-
-### Standard parameters
-
-Many chat models have standardized parameters that can be used to configure the model:
-
-| Parameter      | Description                                                                                                                                                                                                                                                                                                    |
-|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `model`        | The name or identifier of the specific AI model you want to use (e.g., `"gpt-3.5-turbo"` or `"gpt-4"`).                                                                                                                                                                                                        |
-| `temperature`  | Controls the randomness of the model's output. A higher value (e.g., 1.0) makes responses more creative, while a lower value (e.g., 0.0) makes them more deterministic and focused.                                                                                                                            |
-| `timeout`      | The maximum time (in seconds) to wait for a response from the model before canceling the request. Ensures the request doesn’t hang indefinitely.                                                                                                                                                               |
-| `max_tokens`   | Limits the total number of tokens (words and punctuation) in the response. This controls how long the output can be.                                                                                                                                                                                           |
-| `stop`         | Specifies stop sequences that indicate when the model should stop generating tokens. For example, you might use specific strings to signal the end of a response.                                                                                                                                              |
-| `max_retries`  | The maximum number of attempts the system will make to resend a request if it fails due to issues like network timeouts or rate limits.                                                                                                                                                                        |
-| `api_key`      | The API key required for authenticating with the model provider. This is usually issued when you sign up for access to the model.                                                                                                                                                                              |
-| `base_url`     | The URL of the API endpoint where requests are sent. This is typically provided by the model's provider and is necessary for directing your requests.                                                                                                                                                          |
-| `rate_limiter` | An optional [BaseRateLimiter](https://python.langchain.com/api_reference/core/rate_limiters/langchain_core.rate_limiters.BaseRateLimiter.html#langchain_core.rate_limiters.BaseRateLimiter) to space out requests to avoid exceeding rate limits.  See [rate-limiting](#rate-limiting) below for more details. |
-
-Some important things to note:
-
-- Standard parameters only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these.
-- Standard parameters are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in `langchain-community`.
-
-Chat models also accept other parameters that are specific to that integration. To find all the parameters supported by a Chat model head to the their respective [API reference](https://python.langchain.com/api_reference/) for that model.
-
-## Tool calling
-
-Chat models can call [tools](/docs/concepts/tools) to perform tasks such as fetching data from a database, making API requests, or running custom code. Please
-see the [tool calling](/docs/concepts/tool_calling) guide for more information.
-
-## Structured outputs
-
-Chat models can be requested to respond in a particular format (e.g., JSON or matching a particular schema). This feature is extremely
-useful for information extraction tasks. Please read more about
-the technique in the [structured outputs](/docs/concepts/structured_outputs) guide.
-
-## Multimodality
-
-Large Language Models (LLMs) are not limited to processing text. They can also be used to process other types of data, such as images, audio, and video. This is known as [multimodality](/docs/concepts/multimodality).
-
-Currently, only some LLMs support multimodal inputs, and almost none support multimodal outputs. Please consult the specific model documentation for details.
-
-## Context window
-
-A chat model's context window refers to the maximum size of the input sequence the model can process at one time. While the context windows of modern LLMs are quite large, they still present a limitation that developers must keep in mind when working with chat models.
-
-If the input exceeds the context window, the model may not be able to process the entire input and could raise an error. In conversational applications, this is especially important because the context window determines how much information the model can "remember" throughout a conversation. Developers often need to manage the input within the context window to maintain a coherent dialogue without exceeding the limit. For more details on handling memory in conversations, refer to the [memory](https://langchain-ai.github.io/langgraph/concepts/memory/).
-
-The size of the input is measured in [tokens](/docs/concepts/tokens) which are the unit of processing that the model uses.
-
-## Advanced topics
- 
-### Rate-limiting
-
-Many chat model providers impose a limit on the number of requests that can be made in a given time period.
-
-If you hit a rate limit, you will typically receive a rate limit error response from the provider, and will need to wait before making more requests.
-
-You have a few options to deal with rate limits:
-
-1. Try to avoid hitting rate limits by spacing out requests: Chat models accept a `rate_limiter` parameter that can be provided during initialization. This parameter is used to control the rate at which requests are made to the model provider. Spacing out the requests to a given model is a particularly useful strategy when benchmarking models to evaluate their performance. Please see the [how to handle rate limits](/docs/how_to/chat_model_rate_limiting/) for more information on how to use this feature.
-2. Try to recover from rate limit errors: If you receive a rate limit error, you can wait a certain amount of time before retrying the request. The amount of time to wait can be increased with each subsequent rate limit error. Chat models have a `max_retries` parameter that can be used to control the number of retries. See the [standard parameters](#standard-parameters) section for more information.
-3. Fallback to another chat model: If you hit a rate limit with one chat model, you can switch to another chat model that is not rate-limited.
-
-### Caching
-
-Chat model APIs can be slow, so a natural question is whether to cache the results of previous conversations. Theoretically, caching can help improve performance by reducing the number of requests made to the model provider. In practice, caching chat model responses is a complex problem and should be approached with caution.
-
-The reason is that getting a cache hit is unlikely after the first or second interaction in a conversation if relying on caching the **exact** inputs into the model. For example, how likely do you think that multiple conversations start with the exact same message? What about the exact same three messages?
-
-An alternative approach is to use semantic caching, where you cache responses based on the meaning of the input rather than the exact input itself. This can be effective in some situations, but not in others.
-
-A semantic cache introduces a dependency on another model on the critical path of your application (e.g., the semantic cache may rely on an [embedding model](/docs/concepts/embedding_models) to convert text to a vector representation), and it's not guaranteed to capture the meaning of the input accurately.
-
-However, there might be situations where caching chat model responses is beneficial. For example, if you have a chat model that is used to answer frequently asked questions, caching responses can help reduce the load on the model provider, costs, and improve response times.
-
-Please see the [how to cache chat model responses](/docs/how_to/chat_model_caching/) guide for more details.
-
-## Related resources
-
-* How-to guides on using chat models: [how-to guides](/docs/how_to/#chat-models).
-* List of supported chat models: [chat model integrations](/docs/integrations/chat/).
-
-### Conceptual guides
-
-* [Messages](/docs/concepts/messages)
-* [Tool calling](/docs/concepts/tool_calling)
-* [Multimodality](/docs/concepts/multimodality)
-* [Structured outputs](/docs/concepts/structured_outputs)
-* [Tokens](/docs/concepts/tokens)
diff --git a/langchain_md_files/concepts/document_loaders.mdx b/langchain_md_files/concepts/document_loaders.mdx
deleted file mode 100644
index c38e81610e35d64be77e0f18a420ddfe7352b31a..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/document_loaders.mdx
+++ /dev/null
@@ -1,45 +0,0 @@
-# Document loaders
-<span data-heading-keywords="document loader,document loaders"></span>
-
-:::info[Prerequisites]
-
-* [Document loaders API reference](/docs/how_to/#document-loaders)
-:::
-
-Document loaders are designed to load document objects. LangChain has hundreds of integrations with various data sources to load data from: Slack, Notion, Google Drive, etc.
-
-## Integrations
-
-You can find available integrations on the [Document loaders integrations page](/docs/integrations/document_loaders/).
-
-## Interface
-
-Documents loaders implement the [BaseLoader interface](https://python.langchain.com/api_reference/core/document_loaders/langchain_core.document_loaders.base.BaseLoader.html).
-
-Each DocumentLoader has its own specific parameters, but they can all be invoked in the same way with the `.load` method or `.lazy_load`.
-
-Here's a simple example:
-
-```python
-from langchain_community.document_loaders.csv_loader import CSVLoader
-
-loader = CSVLoader(
-    ...  # <-- Integration specific parameters here
-)
-data = loader.load()
-```
-
-When working with large datasets, you can use the `.lazy_load` method:
-
-```python
-for document in loader.lazy_load():
-    print(document)
-```
-
-## Related resources
-
-Please see the following resources for more information:
-
-* [How-to guides for document loaders](/docs/how_to/#document-loaders)
-* [Document API reference](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html)
-* [Document loaders integrations](/docs/integrations/document_loaders/)
diff --git a/langchain_md_files/concepts/embedding_models.mdx b/langchain_md_files/concepts/embedding_models.mdx
deleted file mode 100644
index a91018036c0af838301bd7e21782592c6e070cc5..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/embedding_models.mdx
+++ /dev/null
@@ -1,130 +0,0 @@
-# Embedding models
-<span data-heading-keywords="embedding,embeddings"></span>
-
-:::info[Prerequisites]
-
-* [Documents](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html)
-
-:::
-
-:::info[Note]
-This conceptual overview focuses on text-based embedding models.
-
-Embedding models can also be [multimodal](/docs/concepts/multimodality) though such models are not currently supported by LangChain.
-:::
-
-Imagine being able to capture the essence of any text - a tweet, document, or book - in a single, compact representation.
-This is the power of embedding models, which lie at the heart of many retrieval systems.
-Embedding models transform human language into a format that machines can understand and compare with speed and accuracy. 
-These models take text as input and produce a fixed-length array of numbers, a numerical fingerprint of the text's semantic meaning.
-Embeddings allow search system to find relevant documents not just based on keyword matches, but on semantic understanding. 
-
-## Key concepts
-
-![Conceptual Overview](/img/embeddings_concept.png)
-
-(1) **Embed text as a vector**: Embeddings transform text into a numerical vector representation.
-
-(2) **Measure similarity**: Embedding vectors can be compared using simple mathematical operations.
-
-## Embedding 
-
-### Historical context 
-
-The landscape of embedding models has evolved significantly over the years. 
-A pivotal moment came in 2018 when Google introduced [BERT (Bidirectional Encoder Representations from Transformers)](https://www.nvidia.com/en-us/glossary/bert/). 
-BERT applied transformer models to embed text as a simple vector representation, which lead to unprecedented performance across various NLP tasks.
-However, BERT wasn't optimized for generating sentence embeddings efficiently. 
-This limitation spurred the creation of [SBERT (Sentence-BERT)](https://www.sbert.net/examples/training/sts/README.html), which adapted the BERT architecture to generate semantically rich sentence embeddings, easily comparable via similarity metrics like cosine similarity, dramatically reduced the computational overhead for tasks like finding similar sentences.
-Today, the embedding model ecosystem is diverse, with numerous providers offering their own implementations. 
-To navigate this variety, researchers and practitioners often turn to benchmarks like the Massive Text Embedding Benchmark (MTEB) [here](https://huggingface.co/blog/mteb) for objective comparisons.
-
-:::info[Further reading]
-
-* See the [seminal BERT paper](https://arxiv.org/abs/1810.04805).
-* See Cameron Wolfe's [excellent review](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2) of embedding models.
-* See the [Massive Text Embedding Benchmark (MTEB)](https://huggingface.co/blog/mteb) leaderboard for a comprehensive overview of embedding models.
-
-:::
-
-### Interface
-
-LangChain provides a universal interface for working with them, providing standard methods for common operations.
-This common interface simplifies interaction with various embedding providers through two central methods:
-
-- `embed_documents`: For embedding multiple texts (documents)
-- `embed_query`: For embedding a single text (query)
-
-This distinction is important, as some providers employ different embedding strategies for documents (which are to be searched) versus queries (the search input itself).
-To illustrate, here's a practical example using LangChain's `.embed_documents` method to embed a list of strings:
-
-```python
-from langchain_openai import OpenAIEmbeddings
-embeddings_model = OpenAIEmbeddings()
-embeddings = embeddings_model.embed_documents(
-    [
-        "Hi there!",
-        "Oh, hello!",
-        "What's your name?",
-        "My friends call me World",
-        "Hello World!"
-    ]
-)
-len(embeddings), len(embeddings[0])
-(5, 1536)
-```
-
-For convenience, you can also use the `embed_query` method to embed a single text:
-
-```python
-query_embedding = embeddings_model.embed_query("What is the meaning of life?")
-```
-
-:::info[Further reading]
-
-* See the full list of [LangChain embedding model integrations](/docs/integrations/text_embedding/).
-* See these [how-to guides](/docs/how_to/embed_text) for working with embedding models.
-
-:::
-
-### Integrations
-
-LangChain offers many embedding model integrations which you can find [on the embedding models](/docs/integrations/text_embedding/) integrations page.
-
-## Measure similarity
-
-Each embedding is essentially a set of coordinates, often in a high-dimensional space. 
-In this space, the position of each point (embedding) reflects the meaning of its corresponding text.
-Just as similar words might be close to each other in a thesaurus, similar concepts end up close to each other in this embedding space. 
-This allows for intuitive comparisons between different pieces of text.
-By reducing text to these numerical representations, we can use simple mathematical operations to quickly measure how alike two pieces of text are, regardless of their original length or structure.
-Some common similarity metrics include:
-
-- **Cosine Similarity**: Measures the cosine of the angle between two vectors.
-- **Euclidean Distance**: Measures the straight-line distance between two points.
-- **Dot Product**: Measures the projection of one vector onto another.
-
-The choice of similarity metric should be chosen based on the model.
-As an example, [OpenAI suggests cosine similarity for their embeddings](https://platform.openai.com/docs/guides/embeddings/which-distance-function-should-i-use), which can be easily implemented:
-
-```python
-import numpy as np
-
-def cosine_similarity(vec1, vec2):
-    dot_product = np.dot(vec1, vec2)
-    norm_vec1 = np.linalg.norm(vec1)
-    norm_vec2 = np.linalg.norm(vec2)
-    return dot_product / (norm_vec1 * norm_vec2)
-
-similarity = cosine_similarity(query_result, document_result)
-print("Cosine Similarity:", similarity)
-```  
-
-:::info[Further reading]
-
-* See Simon Willison’s [nice blog post and video](https://simonwillison.net/2023/Oct/23/embeddings/) on embeddings and similarity metrics.
-* See [this documentation](https://developers.google.com/machine-learning/clustering/dnn-clustering/supervised-similarity) from Google on similarity metrics to consider with embeddings.
-* See Pinecone's [blog post](https://www.pinecone.io/learn/vector-similarity/) on similarity metrics.
-* See OpenAI's [FAQ](https://platform.openai.com/docs/guides/embeddings/faq) on what similarity metric to use with OpenAI embeddings.
-
-::: 
diff --git a/langchain_md_files/concepts/evaluation.mdx b/langchain_md_files/concepts/evaluation.mdx
deleted file mode 100644
index 274ef98367cbd1cc95360a4d96983df32049642e..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/evaluation.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Evaluation
-<span data-heading-keywords="evaluation,evaluate"></span>
-
-Evaluation is the process of assessing the performance and effectiveness of your LLM-powered applications.
-It involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose.
-This process is vital for building reliable applications.
-
-![](/img/langsmith_evaluate.png)
-
-[LangSmith](https://docs.smith.langchain.com/) helps with this process in a few ways:
-
-- It makes it easier to create and curate datasets via its tracing and annotation features
-- It provides an evaluation framework that helps you define metrics and run your app against your dataset
-- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code
-
-To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation).
-
diff --git a/langchain_md_files/concepts/example_selectors.mdx b/langchain_md_files/concepts/example_selectors.mdx
deleted file mode 100644
index 32dad8c5fa4437c3e3509c6bfc14459c7691fae9..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/example_selectors.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# Example selectors
-
-:::note Prerequisites
-
-- [Chat models](/docs/concepts/chat_models/)
-- [Few-shot prompting](/docs/concepts/few_shot_prompting/)
-:::
-
-## Overview
-
-One common prompting technique for achieving better performance is to include examples as part of the prompt. This is known as [few-shot prompting](/docs/concepts/few_shot_prompting).
-
-This gives the [language model](/docs/concepts/chat_models/) concrete examples of how it should behave.
-Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.
-
-**Example Selectors** are classes responsible for selecting and then formatting examples into prompts.
-
-## Related resources
-
-* [Example selector how-to guides](/docs/how_to/#example-selectors)
\ No newline at end of file
diff --git a/langchain_md_files/concepts/few_shot_prompting.mdx b/langchain_md_files/concepts/few_shot_prompting.mdx
deleted file mode 100644
index ad11df482554d3144c85bb9afda5efc88231ad92..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/few_shot_prompting.mdx
+++ /dev/null
@@ -1,85 +0,0 @@
-# Few-shot prompting
-
-:::note Prerequisites
-
-- [Chat models](/docs/concepts/chat_models/)
-:::
-
-## Overview
-
-One of the most effective ways to improve model performance is to give a model examples of
-what you want it to do. The technique of adding example inputs and expected outputs
-to a model prompt is known as "few-shot prompting". The technique is based on the
-[Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) paper.
-There are a few things to think about when doing few-shot prompting:
-
-1. How are examples generated?
-2. How many examples are in each prompt?
-3. How are examples selected at runtime?
-4. How are examples formatted in the prompt?
-
-Here are the considerations for each.
-
-## 1. Generating examples
-
-The first and most important step of few-shot prompting is coming up with a good dataset of examples. Good examples should be relevant at runtime, clear, informative, and provide information that was not already known to the model.
-
-At a high-level, the basic ways to generate examples are:
-- Manual: a person/people generates examples they think are useful.
-- Better model: a better (presumably more expensive/slower) model's responses are used as examples for a worse (presumably cheaper/faster) model.
-- User feedback: users (or labelers) leave feedback on interactions with the application and examples are generated based on that feedback (for example, all interactions with positive feedback could be turned into examples).
-- LLM feedback: same as user feedback but the process is automated by having models evaluate themselves.
-
-Which approach is best depends on your task. For tasks where a small number of core principles need to be understood really well, it can be valuable hand-craft a few really good examples.
-For tasks where the space of correct behaviors is broader and more nuanced, it can be useful to generate many examples in a more automated fashion so that there's a higher likelihood of there being some highly relevant examples for any runtime input.
-
-**Single-turn v.s. multi-turn examples**
-
-Another dimension to think about when generating examples is what the example is actually showing.
-
-The simplest types of examples just have a user input and an expected model output. These are single-turn examples.
-
-One more complex type of example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer.
-This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where it's useful to show common errors and spell out exactly why they're wrong and what should be done instead.
-
-## 2. Number of examples
-
-Once we have a dataset of examples, we need to think about how many examples should be in each prompt.
-The key tradeoff is that more examples generally improve performance, but larger prompts increase costs and latency.
-And beyond some threshold having too many examples can start to confuse the model.
-Finding the right number of examples is highly dependent on the model, the task, the quality of the examples, and your cost and latency constraints.
-Anecdotally, the better the model is the fewer examples it needs to perform well and the more quickly you hit steeply diminishing returns on adding more examples.
-But, the best/only way to reliably answer this question is to run some experiments with different numbers of examples.
-
-## 3. Selecting examples
-
-Assuming we are not adding our entire example dataset into each prompt, we need to have a way of selecting examples from our dataset based on a given input. We can do this:
-- Randomly
-- By (semantic or keyword-based) similarity of the inputs
-- Based on some other constraints, like token size
-
-LangChain has a number of [`ExampleSelectors`](/docs/concepts/example_selectors) which make it easy to use any of these techniques.
-
-Generally, selecting by semantic similarity leads to the best model performance. But how important this is is again model and task specific, and is something worth experimenting with.
-
-## 4. Formatting examples
-
-Most state-of-the-art models these days are chat models, so we'll focus on formatting examples for those. Our basic options are to insert the examples:
-- In the system prompt as a string
-- As their own messages
-
-If we insert our examples into the system prompt as a string, we'll need to make sure it's clear to the model where each example begins and which parts are the input versus output. Different models respond better to different syntaxes, like [ChatML](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chat-markup-language), XML, TypeScript, etc.
-
-If we insert our examples as messages, where each example is represented as a sequence of Human, AI messages, we might want to also assign [names](/docs/concepts/messages) to our messages like `"example_user"` and `"example_assistant"` to make it clear that these messages correspond to different actors than the latest input message.
-
-**Formatting tool call examples**
-
-One area where formatting examples as messages can be tricky is when our example outputs have tool calls. This is because different models have different constraints on what types of message sequences are allowed when any tool calls are generated.
-- Some models require that any AIMessage with tool calls be immediately followed by ToolMessages for every tool call,
-- Some models additionally require that any ToolMessages be immediately followed by an AIMessage before the next HumanMessage,
-- Some models require that tools are passed into the model if there are any tool calls / ToolMessages in the chat history.
-
-These requirements are model-specific and should be checked for the model you are using. If your model requires ToolMessages after tool calls and/or AIMessages after ToolMessages and your examples only include expected tool calls and not the actual tool outputs, you can try adding dummy ToolMessages / AIMessages to the end of each example with generic contents to satisfy the API constraints.
-In these cases it's especially worth experimenting with inserting your examples as strings versus messages, as having dummy messages can adversely affect certain models.
-
-You can see a case study of how Anthropic and OpenAI respond to different few-shot prompting techniques on two different tool calling benchmarks [here](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance/).
diff --git a/langchain_md_files/concepts/index.mdx b/langchain_md_files/concepts/index.mdx
deleted file mode 100644
index e7b86f41e4e0f772cc6913f8f9175130fbdfdc00..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/index.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
-# Conceptual guide
-
-This guide provides explanations of the key concepts behind the LangChain framework and AI applications more broadly.
-
-We recommend that you go through at least one of the [Tutorials](/docs/tutorials) before diving into the conceptual guide. This will provide practical context that will make it easier to understand the concepts discussed here.
-
-The conceptual guide does not cover step-by-step instructions or specific implementation examples — those are found in the [How-to guides](/docs/how_to/) and [Tutorials](/docs/tutorials). For detailed reference material, please see the [API reference](https://python.langchain.com/api_reference/).
-
-## High level
-
-- **[Why LangChain?](/docs/concepts/why_langchain)**: Overview of the value that LangChain provides.
-- **[Architecture](/docs/concepts/architecture)**: How packages are organized in the LangChain ecosystem.
-
-## Concepts
-
-- **[Chat models](/docs/concepts/chat_models)**: LLMs exposed via a chat API that process sequences of messages as input and output a message.
-- **[Messages](/docs/concepts/messages)**: The unit of communication in chat models, used to represent model input and output.
-- **[Chat history](/docs/concepts/chat_history)**: A conversation represented as a sequence of messages, alternating between user messages and model responses.
-- **[Tools](/docs/concepts/tools)**: A function with an associated schema defining the function's name, description, and the arguments it accepts.
-- **[Tool calling](/docs/concepts/tool_calling)**: A type of chat model API that accepts tool schemas, along with messages, as input and returns invocations of those tools as part of the output message.
-- **[Structured output](/docs/concepts/structured_outputs)**: A technique to make a chat model respond in a structured format, such as JSON that matches a given schema.
-- **[Memory](https://langchain-ai.github.io/langgraph/concepts/memory/)**: Information about a conversation that is persisted so that it can be used in future conversations.
-- **[Multimodality](/docs/concepts/multimodality)**: The ability to work with data that comes in different forms, such as text, audio, images, and video.
-- **[Runnable interface](/docs/concepts/runnables)**: The base abstraction that many LangChain components and the LangChain Expression Language are built on.
-- **[Streaming](/docs/concepts/streaming)**: LangChain streaming APIs for surfacing results as they are generated.
-- **[LangChain Expression Language (LCEL)](/docs/concepts/lcel)**: A syntax for orchestrating LangChain components. Most useful for simpler applications.
-- **[Document loaders](/docs/concepts/document_loaders)**: Load a source as a list of documents.
-- **[Retrieval](/docs/concepts/retrieval)**: Information retrieval systems can retrieve structured or unstructured data from a datasource in response to a query.
-- **[Text splitters](/docs/concepts/text_splitters)**: Split long text into smaller chunks that can be individually indexed to enable granular retrieval.
-- **[Embedding models](/docs/concepts/embedding_models)**: Models that represent data such as text or images in a vector space.
-- **[Vector stores](/docs/concepts/vectorstores)**: Storage of and efficient search over vectors and associated metadata.
-- **[Retriever](/docs/concepts/retrievers)**: A component that returns relevant documents from a knowledge base in response to a query.
-- **[Retrieval Augmented Generation (RAG)](/docs/concepts/rag)**: A technique that enhances language models by combining them with external knowledge bases.
-- **[Agents](/docs/concepts/agents)**: Use a [language model](/docs/concepts/chat_models) to choose a sequence of actions to take. Agents can interact with external resources via [tool](/docs/concepts/tools).
-- **[Prompt templates](/docs/concepts/prompt_templates)**: Component for factoring out the static parts of a model "prompt" (usually a sequence of messages). Useful for serializing, versioning, and reusing these static parts.
-- **[Output parsers](/docs/concepts/output_parsers)**: Responsible for taking the output of a model and transforming it into a more suitable format for downstream tasks. Output parsers were primarily useful prior to the general availability of [tool calling](/docs/concepts/tool_calling) and [structured outputs](/docs/concepts/structured_outputs).
-- **[Few-shot prompting](/docs/concepts/few_shot_prompting)**: A technique for improving model performance by providing a few examples of the task to perform in the prompt.
-- **[Example selectors](/docs/concepts/example_selectors)**: Used to select the most relevant examples from a dataset based on a given input. Example selectors are used in few-shot prompting to select examples for a prompt.
-- **[Async programming](/docs/concepts/async)**: The basics that one should know to use LangChain in an asynchronous context.
-- **[Callbacks](/docs/concepts/callbacks)**: Callbacks enable the execution of custom auxiliary code in built-in components. Callbacks are used to stream outputs from LLMs in LangChain, trace the intermediate steps of an application, and more.
-- **[Tracing](/docs/concepts/tracing)**: The process of recording the steps that an application takes to go from input to output. Tracing is essential for debugging and diagnosing issues in complex applications.
-- **[Evaluation](/docs/concepts/evaluation)**: The process of assessing the performance and effectiveness of AI applications. This involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose. This process is vital for building reliable applications.
-- **[Testing](/docs/concepts/testing)**: The process of verifying that a component of an integration or application works as expected. Testing is essential for ensuring that the application behaves correctly and that changes to the codebase do not introduce new bugs.
-
-## Glossary
-
-- **[AIMessageChunk](/docs/concepts/messages#aimessagechunk)**: A partial response from an AI message. Used when streaming responses from a chat model.
-- **[AIMessage](/docs/concepts/messages#aimessage)**: Represents a complete response from an AI model.
-- **[astream_events](/docs/concepts/chat_models#key-methods)**: Stream granular information from [LCEL](/docs/concepts/lcel) chains.
-- **[BaseTool](/docs/concepts/tools/#tool-interface)**: The base class for all tools in LangChain.
-- **[batch](/docs/concepts/runnables)**: Use to execute a runnable with batch inputs.
-- **[bind_tools](/docs/concepts/tool_calling/#tool-binding)**: Allows models to interact with tools.
-- **[Caching](/docs/concepts/chat_models#caching)**: Storing results to avoid redundant calls to a chat model.
-- **[Chat models](/docs/concepts/multimodality/#multimodality-in-chat-models)**: Chat models that handle multiple data modalities.
-- **[Configurable runnables](/docs/concepts/runnables/#configurable-runnables)**: Creating configurable Runnables.
-- **[Context window](/docs/concepts/chat_models#context-window)**: The maximum size of input a chat model can process.
-- **[Conversation patterns](/docs/concepts/chat_history#conversation-patterns)**: Common patterns in chat interactions.
-- **[Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html)**: LangChain's representation of a document.
-- **[Embedding models](/docs/concepts/multimodality/#multimodality-in-embedding-models)**: Models that generate vector embeddings for various data types.
-- **[HumanMessage](/docs/concepts/messages#humanmessage)**: Represents a message from a human user.
-- **[InjectedState](/docs/concepts/tools#injectedstate)**: A state injected into a tool function.
-- **[InjectedStore](/docs/concepts/tools#injectedstore)**: A store that can be injected into a tool for data persistence.
-- **[InjectedToolArg](/docs/concepts/tools#injectedtoolarg)**: Mechanism to inject arguments into tool functions.
-- **[input and output types](/docs/concepts/runnables#input-and-output-types)**: Types used for input and output in Runnables.
-- **[Integration packages](/docs/concepts/architecture/#integration-packages)**: Third-party packages that integrate with LangChain.
-- **[Integration tests](/docs/concepts/testing#integration-tests)**: Tests that verify the correctness of the interaction between components, usually run with access to the underlying API that powers an integration.
-- **[invoke](/docs/concepts/runnables)**: A standard method to invoke a Runnable.
-- **[JSON mode](/docs/concepts/structured_outputs#json-mode)**: Returning responses in JSON format.
-- **[langchain-community](/docs/concepts/architecture#langchain-community)**: Community-driven components for LangChain.
-- **[langchain-core](/docs/concepts/architecture#langchain-core)**: Core langchain package. Includes base interfaces and in-memory implementations.
-- **[langchain](/docs/concepts/architecture#langchain)**: A package for higher level components (e.g., some pre-built chains).
-- **[langgraph](/docs/concepts/architecture#langgraph)**: Powerful orchestration layer for LangChain. Use to build complex pipelines and workflows.
-- **[langserve](/docs/concepts/architecture#langserve)**: Used to deploy LangChain Runnables as REST endpoints. Uses FastAPI. Works primarily for LangChain Runnables, does not currently integrate with LangGraph.
-- **[LLMs (legacy)](/docs/concepts/text_llms)**: Older language models that take a string as input and return a string as output.
-- **[Managing chat history](/docs/concepts/chat_history#managing-chat-history)**: Techniques to maintain and manage the chat history.
-- **[OpenAI format](/docs/concepts/messages#openai-format)**: OpenAI's message format for chat models.
-- **[Propagation of RunnableConfig](/docs/concepts/runnables/#propagation-of-runnableconfig)**: Propagating configuration through Runnables. Read if working with python 3.9, 3.10 and async.
-- **[rate-limiting](/docs/concepts/chat_models#rate-limiting)**: Client side rate limiting for chat models.
-- **[RemoveMessage](/docs/concepts/messages/#removemessage)**: An abstraction used to remove a message from chat history, used primarily in LangGraph.
-- **[role](/docs/concepts/messages#role)**: Represents the role (e.g., user, assistant) of a chat message.
-- **[RunnableConfig](/docs/concepts/runnables/#runnableconfig)**: Use to pass run time information to Runnables (e.g., `run_name`, `run_id`, `tags`, `metadata`, `max_concurrency`, `recursion_limit`, `configurable`).
-- **[Standard parameters for chat models](/docs/concepts/chat_models#standard-parameters)**: Parameters such as API key, `temperature`, and `max_tokens`.
-- **[Standard tests](/docs/concepts/testing#standard-tests)**: A defined set of unit and integration tests that all integrations must pass.
-- **[stream](/docs/concepts/streaming)**: Use to stream output from a Runnable or a graph.
-- **[Tokenization](/docs/concepts/tokens)**: The process of converting data into tokens and vice versa.
-- **[Tokens](/docs/concepts/tokens)**: The basic unit that a language model reads, processes, and generates under the hood.
-- **[Tool artifacts](/docs/concepts/tools#tool-artifacts)**: Add artifacts to the output of a tool that will not be sent to the model, but will be available for downstream processing.
-- **[Tool binding](/docs/concepts/tool_calling#tool-binding)**: Binding tools to models.
-- **[@tool](/docs/concepts/tools/#create-tools-using-the-tool-decorator)**: Decorator for creating tools in LangChain.
-- **[Toolkits](/docs/concepts/tools#toolkits)**: A collection of tools that can be used together.
-- **[ToolMessage](/docs/concepts/messages#toolmessage)**: Represents a message that contains the results of a tool execution.
-- **[Unit tests](/docs/concepts/testing#unit-tests)**: Tests that verify the correctness of individual components, run in isolation without access to the Internet.
-- **[Vector stores](/docs/concepts/vectorstores)**: Datastores specialized for storing and efficiently searching vector embeddings.
-- **[with_structured_output](/docs/concepts/structured_outputs/#structured-output-method)**: A helper method for chat models that natively support [tool calling](/docs/concepts/tool_calling) to get structured output matching a given schema specified via Pydantic, JSON schema or a function.
-- **[with_types](/docs/concepts/runnables#with_types)**: Method to overwrite the input and output types of a runnable. Useful when working with complex LCEL chains and deploying with LangServe.
diff --git a/langchain_md_files/concepts/key_value_stores.mdx b/langchain_md_files/concepts/key_value_stores.mdx
deleted file mode 100644
index 2f5b7e640e7d5e7ba0fa141e61669e39152488d9..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/key_value_stores.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
-# Key-value stores
-
-## Overview
-
-LangChain provides a key-value store interface for storing and retrieving data.
-
-LangChain includes a [`BaseStore`](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.BaseStore.html) interface,
-which allows for storage of arbitrary data. However, LangChain components that require KV-storage accept a
-more specific `BaseStore[str, bytes]` instance that stores binary data (referred to as a `ByteStore`), and internally take care of
-encoding and decoding data for their specific needs.
-
-This means that as a user, you only need to think about one type of store rather than different ones for different types of data.
-
-## Usage
-
-The key-value store interface in LangChain is used primarily for:
-
-1. Caching [embeddings](/docs/concepts/embedding_models) via [CachedBackedEmbeddings](https://python.langchain.com/api_reference/langchain/embeddings/langchain.embeddings.cache.CacheBackedEmbeddings.html#langchain.embeddings.cache.CacheBackedEmbeddings) to avoid recomputing embeddings for repeated queries or when re-indexing content.
-
-2. As a simple [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) persistence layer in some retrievers.
-
-Please see these how-to guides for more information:
-
-* [How to cache embeddings guide](/docs/how_to/caching_embeddings/).
-* [How to retriever using multiple vectors per document](/docs/how_to/custom_retriever/).
-
-## Interface
-
-All [`BaseStores`](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows for modifying **multiple** key-value pairs at once:
-
-- `mget(key: Sequence[str]) -> List[Optional[bytes]]`: get the contents of multiple keys, returning `None` if the key does not exist
-- `mset(key_value_pairs: Sequence[Tuple[str, bytes]]) -> None`: set the contents of multiple keys
-- `mdelete(key: Sequence[str]) -> None`: delete multiple keys
-- `yield_keys(prefix: Optional[str] = None) -> Iterator[str]`: yield all keys in the store, optionally filtering by a prefix
-
-## Integrations
-
-Please reference the [stores integration page](/docs/integrations/stores/) for a list of available key-value store integrations.
diff --git a/langchain_md_files/concepts/lcel.mdx b/langchain_md_files/concepts/lcel.mdx
deleted file mode 100644
index d7701c0ad51ad69f69f71e1c2bef40b30b6e3c00..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/lcel.mdx
+++ /dev/null
@@ -1,221 +0,0 @@
-# LangChain Expression Language (LCEL)
-
-:::info Prerequisites
-* [Runnable Interface](/docs/concepts/runnables)
-:::
-
-The **L**ang**C**hain **E**xpression **L**anguage (LCEL) takes a [declarative](https://en.wikipedia.org/wiki/Declarative_programming) approach to building new [Runnables](/docs/concepts/runnables) from existing Runnables.
-
-This means that you describe what *should* happen, rather than *how* it should happen, allowing LangChain to optimize the run-time execution of the chains.
-
-We often refer to a `Runnable` created using LCEL as a "chain". It's important to remember that a "chain" is `Runnable` and it implements the full [Runnable Interface](/docs/concepts/runnables).
-
-:::note
-* The [LCEL cheatsheet](/docs/how_to/lcel_cheatsheet/) shows common patterns that involve the Runnable interface and LCEL expressions.
-* Please see the following list of [how-to guides](/docs/how_to/#langchain-expression-language-lcel) that cover common tasks with LCEL.
-* A list of built-in `Runnables` can be found in the [LangChain Core API Reference](https://python.langchain.com/api_reference/core/runnables.html). Many of these Runnables are useful when composing custom "chains" in LangChain using LCEL.
-:::
-
-## Benefits of LCEL
-
-LangChain optimizes the run-time execution of chains built with LCEL in a number of ways:
-
-- **Optimized parallel execution**: Run Runnables in parallel using [RunnableParallel](#runnableparallel) or run multiple inputs through a given chain in parallel using the [Runnable Batch API](/docs/concepts/runnables/#optimized-parallel-execution-batch). Parallel execution can significantly reduce the latency as processing can be done in parallel instead of sequentially.
-- **Guaranteed Async support**: Any chain built with LCEL can be run asynchronously using the [Runnable Async API](/docs/concepts/runnables/#asynchronous-support). This can be useful when running chains in a server environment where you want to handle large number of requests concurrently.
-- **Simplify streaming**: LCEL chains can be streamed, allowing for incremental output as the chain is executed. LangChain can optimize the streaming of the output to minimize the time-to-first-token(time elapsed until the first chunk of output from a [chat model](/docs/concepts/chat_models) or [llm](/docs/concepts/text_llms) comes out).
-
-Other benefits include:
-
-- [**Seamless LangSmith tracing**](https://docs.smith.langchain.com)
-As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
-With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability.
-- **Standard API**: Because all chains are built using the Runnable interface, they can be used in the same way as any other Runnable.
-- [**Deployable with LangServe**](/docs/concepts/architecture#langserve): Chains built with LCEL can be deployed using for production use.
-
-## Should I use LCEL?
-
-LCEL is an [orchestration solution](https://en.wikipedia.org/wiki/Orchestration_(computing)) -- it allows LangChain to handle run-time execution of chains in an optimized way.
-
-While we have seen users run chains with hundreds of steps in production, we generally recommend using LCEL for simpler orchestration tasks. When the application requires complex state management, branching, cycles or multiple agents, we recommend that users take advantage of [LangGraph](/docs/concepts/architecture#langgraph).
-
-In LangGraph, users define graphs that specify the application's flow. This allows users to keep using LCEL within individual nodes when LCEL is needed, while making it easy to define complex orchestration logic that is more readable and maintainable.
-
-Here are some guidelines:
-
-* If you are making a single LLM call, you don't need LCEL; instead call the underlying [chat model](/docs/concepts/chat_models) directly.
-* If you have a simple chain (e.g., prompt + llm + parser, simple retrieval set up etc.), LCEL is a reasonable fit, if you're taking advantage of the LCEL benefits.
-* If you're building a complex chain (e.g., with branching, cycles, multiple agents, etc.) use [LangGraph](/docs/concepts/architecture#langgraph) instead. Remember that you can always use LCEL within individual nodes in LangGraph.
-
-## Composition Primitives
-
-`LCEL` chains are built by composing existing `Runnables` together. The two main composition primitives are [RunnableSequence](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableSequence.html#langchain_core.runnables.base.RunnableSequence) and [RunnableParallel](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableParallel.html#langchain_core.runnables.base.RunnableParallel).
-
-Many other composition primitives (e.g., [RunnableAssign](
-https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnableAssign.html#langchain_core.runnables.passthrough.RunnableAssign
-)) can be thought of as variations of these two primitives.
-
-:::note
-You can find a list of all composition primitives in the [LangChain Core API Reference](https://python.langchain.com/api_reference/core/runnables.html).
-:::
-
-### RunnableSequence
-
-`RunnableSequence` is a composition primitive that allows you "chain" multiple runnables sequentially, with the output of one runnable serving as the input to the next.
-
-```python
-from langchain_core.runnables import RunnableSequence
-chain = RunnableSequence([runnable1, runnable2])
-```
-
-Invoking the `chain` with some input:
-
-```python
-final_output = chain.invoke(some_input)
-```
-
-corresponds to the following:
-
-```python
-output1 = runnable1.invoke(some_input)
-final_output = runnable2.invoke(output1)
-```
-
-:::note
-`runnable1` and `runnable2` are placeholders for any `Runnable` that you want to chain together.
-:::
-
-### RunnableParallel
-
-`RunnableParallel` is a composition primitive that allows you to run multiple runnables concurrently, with the same input provided to each.
-
-```python
-from langchain_core.runnables import RunnableParallel
-chain = RunnableParallel({
-    "key1": runnable1,
-    "key2": runnable2,
-})
-```
-
-Invoking the `chain` with some input:
-
-```python
-final_output = chain.invoke(some_input)
-```
-
-Will yield a `final_output` dictionary with the same keys as the input dictionary, but with the values replaced by the output of the corresponding runnable.
-
-```python
-{
-    "key1": runnable1.invoke(some_input),
-    "key2": runnable2.invoke(some_input),
-}
-```
-
-Recall, that the runnables are executed in parallel, so while the result is the same as
-dictionary comprehension shown above, the execution time is much faster.
-
-:::note
-`RunnableParallel`supports both synchronous and asynchronous execution (as all `Runnables` do).
-
-* For synchronous execution, `RunnableParallel` uses a [ThreadPoolExecutor](https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor) to run the runnables concurrently.
-* For asynchronous execution, `RunnableParallel` uses [asyncio.gather](https://docs.python.org/3/library/asyncio.html#asyncio.gather) to run the runnables concurrently.
-:::
-
-## Composition Syntax
-
-The usage of `RunnableSequence` and `RunnableParallel` is so common that we created a shorthand syntax for using them. This helps
-to make the code more readable and concise.
-
-### The `|` operator
-
-We have [overloaded](https://docs.python.org/3/reference/datamodel.html#special-method-names) the `|` operator to create a `RunnableSequence` from two `Runnables`.
-
-```python
-chain = runnable1 | runnable2
-```
-
-is Equivalent to:
-
-```python
-chain = RunnableSequence([runnable1, runnable2])
-```
-
-### The `.pipe` method
-
-If you have moral qualms with operator overloading, you can use the `.pipe` method instead. This is equivalent to the `|` operator.
-
-```python
-chain = runnable1.pipe(runnable2)
-```
-
-### Coercion
-
-LCEL applies automatic type coercion to make it easier to compose chains.
-
-If you do not understand the type coercion, you can always use the `RunnableSequence` and `RunnableParallel` classes directly.
-
-This will make the code more verbose, but it will also make it more explicit.
-
-#### Dictionary to RunnableParallel
-
-Inside an LCEL expression, a dictionary is automatically converted to a `RunnableParallel`.
-
-For example, the following code:
-
-```python
-mapping = {
-    "key1": runnable1,
-    "key2": runnable2,
-}
-
-chain = mapping | runnable3
-```
-
-It gets automatically converted to the following:
-
-```python
-chain = RunnableSequence([RunnableParallel(mapping), runnable3])
-```
-
-:::caution
-You have to be careful because the `mapping` dictionary is not a `RunnableParallel` object, it is just a dictionary. This means that the following code will raise an `AttributeError`:
-
-```python
-mapping.invoke(some_input)
-```
-:::
-
-#### Function to RunnableLambda
-
-Inside an LCEL expression, a function is automatically converted to a `RunnableLambda`.
-
-```
-def some_func(x):
-    return x
-
-chain = some_func | runnable1
-```
-
-It gets automatically converted to the following:
-
-```python
-chain = RunnableSequence([RunnableLambda(some_func), runnable1])
-```
-
-:::caution
-You have to be careful because the lambda function is not a `RunnableLambda` object, it is just a function. This means that the following code will raise an `AttributeError`:
-
-```python
-lambda x: x + 1.invoke(some_input)
-```
-:::
-
-## Legacy chains
-
-LCEL aims to provide consistency around behavior and customization over legacy subclassed chains such as `LLMChain` and
-`ConversationalRetrievalChain`. Many of these legacy chains hide important details like prompts, and as a wider variety
-of viable models emerge, customization has become more and more important.
-
-If you are currently using one of these legacy chains, please see [this guide for guidance on how to migrate](/docs/versions/migrating_chains).
-
-For guides on how to do specific tasks with LCEL, check out [the relevant how-to guides](/docs/how_to/#langchain-expression-language-lcel).
diff --git a/langchain_md_files/concepts/messages.mdx b/langchain_md_files/concepts/messages.mdx
deleted file mode 100644
index c8765ab3d3471d0ceb001cc3cb9058d7503f61ff..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/messages.mdx
+++ /dev/null
@@ -1,245 +0,0 @@
-# Messages
-
-:::info Prerequisites
-- [Chat Models](/docs/concepts/chat_models)
-:::
-
-## Overview
-
-Messages are the unit of communication in [chat models](/docs/concepts/chat_models). They are used to represent the input and output of a chat model, as well as any additional context or metadata that may be associated with a conversation.
-
-Each message has a **role** (e.g., "user", "assistant") and **content** (e.g., text, multimodal data) with additional metadata that varies depending on the chat model provider.
-
-LangChain provides a unified message format that can be used across chat models, allowing users to work with different chat models without worrying about the specific details of the message format used by each model provider.
-
-## What is inside a message?
-
-A message typically consists of the following pieces of information:
-
-- **Role**: The role of the message (e.g., "user", "assistant").
-- **Content**: The content of the message (e.g., text, multimodal data).
-- Additional metadata: id, name, [token usage](/docs/concepts/tokens) and other model-specific metadata.
-
-### Role
-
-Roles are used to distinguish between different types of messages in a conversation and help the chat model understand how to respond to a given sequence of messages.
-
-| **Role**              | **Description**                                                                                                                                                                                                 |
-|-----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| **system**            | Used to tell the chat model how to behave and provide additional context. Not supported by all chat model providers.                                                                                            |
-| **user**              | Represents input from a user interacting with the model, usually in the form of text or other interactive input.                                                                                                |
-| **assistant**         | Represents a response from the model, which can include text or a request to invoke tools.                                                                                                                      |
-| **tool**              | A message used to pass the results of a tool invocation back to the model after external data or processing has been retrieved. Used with chat models that support [tool calling](/docs/concepts/tool_calling). |
-| **function** (legacy) | This is a legacy role, corresponding to OpenAI's legacy function-calling API. **tool** role should be used instead.                                                                                             |
-
-### Content
-
-The content of a message text or a list of dictionaries representing [multimodal data](/docs/concepts/multimodality) (e.g., images, audio, video). The exact format of the content can vary between different chat model providers.
-
-Currently, most chat models support text as the primary content type, with some models also supporting multimodal data. However, support for multimodal data is still limited across most chat model providers.
-
-For more information see:
-* [SystemMessage](#systemmessage) -- for content which should be passed to direct the conversation
-* [HumanMessage](#humanmessage) -- for content in the input from the user.
-* [AIMessage](#aimessage) -- for content in the response from the model.
-* [Multimodality](/docs/concepts/multimodality) -- for more information on multimodal content.
-
-### Other Message Data
-
-Depending on the chat model provider, messages can include other data such as:
-
-- **ID**: An optional unique identifier for the message.
-- **Name**: An optional `name` property which allows differentiate between different entities/speakers with the same role. Not all models support this!
-- **Metadata**: Additional information about the message, such as timestamps, token usage, etc.
-- **Tool Calls**: A request made by the model to call one or more tools> See [tool calling](/docs/concepts/tool_calling) for more information.
-
-## Conversation Structure
-
-The sequence of messages into a chat model should follow a specific structure to ensure that the chat model can generate a valid response.
-
-For example, a typical conversation structure might look like this:
-
-1. **User Message**: "Hello, how are you?"
-2. **Assistant Message**: "I'm doing well, thank you for asking."
-3. **User Message**: "Can you tell me a joke?"
-4. **Assistant Message**: "Sure! Why did the scarecrow win an award? Because he was outstanding in his field!"
-
-Please read the [chat history](/docs/concepts/chat_history) guide for more information on managing chat history and ensuring that the conversation structure is correct.
-
-## LangChain Messages
-
-LangChain provides a unified message format that can be used across all chat models, allowing users to work with different chat models without worrying about the specific details of the message format used by each model provider.
-
-LangChain messages are Python objects that subclass from a [BaseMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.base.BaseMessage.html).
-
-The five main message types are:
-
-- [SystemMessage](#systemmessage): corresponds to **system** role
-- [HumanMessage](#humanmessage): corresponds to **user** role
-- [AIMessage](#aimessage): corresponds to **assistant** role
-- [AIMessageChunk](#aimessagechunk): corresponds to **assistant** role, used for [streaming](/docs/concepts/streaming) responses
-- [ToolMessage](#toolmessage): corresponds to **tool** role
-
-Other important messages include:
-
-- [RemoveMessage](#removemessage) -- does not correspond to any role. This is an abstraction, mostly used in [LangGraph](/docs/concepts/architecture#langgraph) to manage chat history.
-- **Legacy** [FunctionMessage](#legacy-functionmessage): corresponds to the **function** role in OpenAI's **legacy** function-calling API.
-
-You can find more information about **messages** in the [API Reference](https://python.langchain.com/api_reference/core/messages.html).
-
-### SystemMessage
-
-A `SystemMessage` is used to prime the behavior of the AI model and provide additional context, such as instructing the model to adopt a specific persona or setting the tone of the conversation (e.g., "This is a conversation about cooking").
-
-Different chat providers may support system message in one of the following ways:
-
-* **Through a "system" message role**: In this case, a system message is included as part of the message sequence with the role explicitly set as "system."
-* **Through a separate API parameter for system instructions**: Instead of being included as a message, system instructions are passed via a dedicated API parameter.
-* **No support for system messages**: Some models do not support system messages at all.
-
-Most major chat model providers support system instructions via either a chat message or a separate API parameter. LangChain will automatically adapt based on the provider’s capabilities. If the provider supports a separate API parameter for system instructions, LangChain will extract the content of a system message and pass it through that parameter.
-
-If no system message is supported by the provider, in most cases LangChain will attempt to incorporate the system message's content into a HumanMessage or raise an exception if that is not possible. However, this behavior is not yet consistently enforced across all implementations, and if using a less popular implementation of a chat model (e.g., an implementation from the `langchain-community` package) it is recommended to check the specific documentation for that model.
-
-### HumanMessage
-
-The `HumanMessage` corresponds to the **"user"** role. A human message represents input from a user interacting with the model.
-
-#### Text Content
-
-Most chat models expect the user input to be in the form of text.
-
-```python
-from langchain_core.messages import HumanMessage
-
-model.invoke([HumanMessage(content="Hello, how are you?")])
-```
-
-:::tip
-When invoking a chat model with a string as input, LangChain will automatically convert the string into a `HumanMessage` object. This is mostly useful for quick testing.
-
-```python
-model.invoke("Hello, how are you?")
-```
-:::
-
-#### Multi-modal Content
-
-Some chat models accept multimodal inputs, such as images, audio, video, or files like PDFs.
-
-Please see the [multimodality](/docs/concepts/multimodality) guide for more information.
-
-### AIMessage
-
-`AIMessage` is used to represent a message with the role **"assistant"**. This is the response from the model, which can include text or a request to invoke tools. It could also include other media types like images, audio, or video -- though this is still uncommon at the moment.
-
-```python
-from langchain_core.messages import HumanMessage
-ai_message = model.invoke([HumanMessage("Tell me a joke")])
-ai_message # <-- AIMessage
-```
-
-An `AIMessage` has the following attributes. The attributes which are **standardized** are the ones that LangChain attempts to standardize across different chat model providers. **raw** fields are specific to the model provider and may vary.
-
-| Attribute            | Standardized/Raw | Description                                                                                                                                                                                                             |
-|----------------------|:-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `content`            | Raw              | Usually a string, but can be a list of content blocks. See [content](#content) for details.                                                                                                                             |
-| `tool_calls`         | Standardized     | Tool calls associated with the message. See [tool calling](/docs/concepts/tool_calling) for details.                                                                                                                    |
-| `invalid_tool_calls` | Standardized     | Tool calls with parsing errors associated with the message. See [tool calling](/docs/concepts/tool_calling) for details.                                                                                                |
-| `usage_metadata`     | Standardized     | Usage metadata for a message, such as [token counts](/docs/concepts/tokens). See [Usage Metadata API Reference](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html). |
-| `id`                 | Standardized     | An optional unique identifier for the message, ideally provided by the provider/model that created the message.                                                                                                         |
-| `response_metadata`  | Raw              | Response metadata, e.g., response headers, logprobs, token counts.                                                                                                                                                      |
-
-#### content
-
-The **content** property of an `AIMessage` represents the response generated by the chat model.
-
-The content is either:
-
-- **text** -- the norm for virtually all chat models.
-- A **list of dictionaries** -- Each dictionary represents a content block and is associated with a `type`.
-    * Used by Anthropic for surfacing agent thought process when doing [tool calling](/docs/concepts/tool_calling).
-    * Used by OpenAI for audio outputs. Please see [multi-modal content](/docs/concepts/multimodality) for more information.
-
-:::important
-The **content** property is **not** standardized across different chat model providers, mostly because there are
-still few examples to generalize from.
-:::
-
-### AIMessageChunk
-
-It is common to [stream](/docs/concepts/streaming) responses for the chat model as they are being generated, so the user can see the response in real-time instead of waiting for the entire response to be generated before displaying it.
-
-It is returned from the `stream`, `astream` and `astream_events` methods of the chat model.
-
-For example,
-
-```python
-for chunk in model.stream([HumanMessage("what color is the sky?")]):
-    print(chunk)
-```
-
-`AIMessageChunk` follows nearly the same structure as `AIMessage`, but uses a different [ToolCallChunk](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk)
-to be able to stream tool calling in a standardized manner.
-
-
-#### Aggregating
-
-`AIMessageChunks` support the `+` operator to merge them into a single `AIMessage`. This is useful when you want to display the final response to the user.
-
-```python
-ai_message = chunk1 + chunk2 + chunk3 + ...
-```
-
-### ToolMessage
-
-This represents a message with role "tool", which contains the result of [calling a tool](/docs/concepts/tool_calling). In addition to `role` and `content`, this message has:
-
-- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
-- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
-
-Please see [tool calling](/docs/concepts/tool_calling) for more information.
-
-### RemoveMessage
-
-This is a special message type that does not correspond to any roles. It is used
-for managing chat history in [LangGraph](/docs/concepts/architecture#langgraph).
-
-Please see the following for more information on how to use the `RemoveMessage`:
-
-* [Memory conceptual guide](https://langchain-ai.github.io/langgraph/concepts/memory/)
-* [How to delete messages](https://langchain-ai.github.io/langgraph/how-tos/memory/delete-messages/)
-
-### (Legacy) FunctionMessage
-
-This is a legacy message type, corresponding to OpenAI's legacy function-calling API. `ToolMessage` should be used instead to correspond to the updated tool-calling API.
-
-## OpenAI Format
-
-### Inputs
-
-Chat models also accept OpenAI's format as **inputs** to chat models:
-
-```python
-chat_model.invoke([
-    {
-        "role": "user",
-        "content": "Hello, how are you?",
-    },
-    {
-        "role": "assistant",
-        "content": "I'm doing well, thank you for asking.",
-    },
-    {
-        "role": "user",
-        "content": "Can you tell me a joke?",
-    }
-])
-```
-
-### Outputs
-
-At the moment, the output of the model will be in terms of LangChain messages, so you will need to convert the output to the OpenAI format if you
-need OpenAI format for the output as well.
-
-The [convert_to_openai_messages](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.utils.convert_to_openai_messages.html) utility function can be used to convert from LangChain messages to OpenAI format.
diff --git a/langchain_md_files/concepts/multimodality.mdx b/langchain_md_files/concepts/multimodality.mdx
deleted file mode 100644
index c74c697e8bc8dfe9d424b52f6234f100882080e7..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/multimodality.mdx
+++ /dev/null
@@ -1,88 +0,0 @@
-# Multimodality
-
-## Overview
-
-**Multimodality** refers to the ability to work with data that comes in different forms, such as text, audio, images, and video. Multimodality can appear in various components, allowing models and systems to handle and process a mix of these data types seamlessly.
-
-- **Chat Models**: These could, in theory, accept and generate multimodal inputs and outputs, handling a variety of data types like text, images, audio, and video.
-- **Embedding Models**: Embedding Models can represent multimodal content, embedding various forms of data—such as text, images, and audio—into vector spaces.
-- **Vector Stores**: Vector stores could search over embeddings that represent multimodal data, enabling retrieval across different types of information.
-
-## Multimodality in chat models
-
-:::info Pre-requisites
-* [Chat models](/docs/concepts/chat_models)
-* [Messages](/docs/concepts/messages)
-:::
- 
-Multimodal support is still relatively new and less common, model providers have not yet standardized on the "best" way to define the API. As such, LangChain's multimodal abstractions are lightweight and flexible, designed to accommodate different model providers' APIs and interaction patterns, but are **not** standardized across models.
-
-### How to use multimodal models
-
-* Use the [chat model integration table](/docs/integrations/chat/) to identify which models support multimodality.
-* Reference the [relevant how-to guides](/docs/how_to/#multimodal) for specific examples of how to use multimodal models.
-
-### What kind of multimodality is supported?
-
-#### Inputs
-
-Some models can accept multimodal inputs, such as images, audio, video, or files. The types of multimodal inputs supported depend on the model provider. For instance, [Google's Gemini](/docs/integrations/chat/google_generative_ai/) supports documents like PDFs as inputs.
-
-Most chat models that support **multimodal inputs** also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
-
-The gist of passing multimodal inputs to a chat model is to use content blocks that specify a type and corresponding data. For example, to pass an image to a chat model:
-
-```python
-from langchain_core.messages import HumanMessage
-
-message = HumanMessage(
-    content=[
-        {"type": "text", "text": "describe the weather in this image"},
-        {"type": "image_url", "image_url": {"url": image_url}},
-    ],
-)
-response = model.invoke([message])
-```
-
-:::caution
-The exact format of the content blocks may vary depending on the model provider. Please refer to the chat model's
-integration documentation for the correct format. Find the integration in the [chat model integration table](/docs/integrations/chat/).
-:::
-
-#### Outputs
-
-Virtually no popular chat models support multimodal outputs at the time of writing (October 2024). 
-
-The only exception is OpenAI's chat model ([gpt-4o-audio-preview](/docs/integrations/chat/openai/)), which can generate audio outputs.
-
-Multimodal outputs will appear as part of the [AIMessage](/docs/concepts/messages/#aimessage) response object.
-
-Please see the [ChatOpenAI](/docs/integrations/chat/openai/) for more information on how to use multimodal outputs.
-
-#### Tools
-
-Currently, no chat model is designed to work **directly** with multimodal data in a [tool call request](/docs/concepts/tool_calling) or [ToolMessage](/docs/concepts/tool_calling) result.
-
-However, a chat model can easily interact with multimodal data by invoking tools with references (e.g., a URL) to the multimodal data, rather than the data itself. For example, any model capable of [tool calling](/docs/concepts/tool_calling) can be equipped with tools to download and process images, audio, or video.
-
-## Multimodality in embedding models
-
-:::info Prerequisites
-* [Embedding Models](/docs/concepts/embedding_models)
-:::
-
-**Embeddings** are vector representations of data used for tasks like similarity search and retrieval.
-
-The current [embedding interface](https://python.langchain.com/api_reference/core/embeddings/langchain_core.embeddings.embeddings.Embeddings.html#langchain_core.embeddings.embeddings.Embeddings) used in LangChain is optimized entirely for text-based data, and will **not** work with multimodal data.
-
-As use cases involving multimodal search and retrieval tasks become more common, we expect to expand the embedding interface to accommodate other data types like images, audio, and video.
-
-## Multimodality in vector stores
-
-:::info Prerequisites
-* [Vector stores](/docs/concepts/vectorstores)
-:::
-
-Vector stores are databases for storing and retrieving embeddings, which are typically used in search and retrieval tasks. Similar to embeddings, vector stores are currently optimized for text-based data.
-
-As use cases involving multimodal search and retrieval tasks become more common, we expect to expand the vector store interface to accommodate other data types like images, audio, and video.
diff --git a/langchain_md_files/concepts/output_parsers.mdx b/langchain_md_files/concepts/output_parsers.mdx
deleted file mode 100644
index 254b3bcf3fc1f5a5cd877ef468830d99bb613c2a..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/output_parsers.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
-# Output parsers
-
-<span data-heading-keywords="output parser"></span>
-
-:::note
-
-The information here refers to parsers that take a text output from a model try to parse it into a more structured representation.
-More and more models are supporting function (or tool) calling, which handles this automatically.
-It is recommended to use function/tool calling rather than output parsing.
-See documentation for that [here](/docs/concepts/tool_calling).
-
-:::
-
-`Output parser` is responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.
-Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs.
-
-LangChain has lots of different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information:
-
-- **Name**: The name of the output parser
-- **Supports Streaming**: Whether the output parser supports streaming.
-- **Has Format Instructions**: Whether the output parser has format instructions. This is generally available except when (a) the desired schema is not specified in the prompt but rather in other parameters (like OpenAI function calling), or (b) when the OutputParser wraps another OutputParser.
-- **Calls LLM**: Whether this output parser itself calls an LLM. This is usually only done by output parsers that attempt to correct misformatted output.
-- **Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific kwargs.
-- **Output Type**: The output type of the object returned by the parser.
-- **Description**: Our commentary on this output parser and when to use it.
-
-| Name                                                                                                                                                                                                                                    | Supports Streaming | Has Format Instructions | Calls LLM | Input Type         | Output Type          | Description                                                                                                                                                                                                                                              |
-|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------|-------------------------|-----------|--------------------|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Str](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.string.StrOutputParser.html)                                                                                                         | ✅                  |                         |           | `str` \| `Message` | String                | Parses texts from message objects. Useful for handling variable formats of message content (e.g., extracting text from content blocks).                                                                                                                |
-| [JSON](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.json.JsonOutputParser.html)                                                     | ✅                  | ✅                       |           | `str` \| `Message` | JSON object          | Returns a JSON object as specified. You can specify a Pydantic model and it will return JSON for that model. Probably the most reliable output parser for getting structured data that does NOT use function calling.                                    |
-| [XML](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html#langchain_core.output_parsers.xml.XMLOutputParser)                                                          | ✅                  | ✅                       |           | `str` \| `Message` | `dict`               | Returns a dictionary of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's).                                                                                                                            |
-| [CSV](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.list.CommaSeparatedListOutputParser.html#langchain_core.output_parsers.list.CommaSeparatedListOutputParser)                          | ✅                  | ✅                       |           | `str` \| `Message` | `List[str]`          | Returns a list of comma separated values.                                                                                                                                                                                                                |
-| [OutputFixing](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser)                                                |                    |                         | ✅         | `str` \| `Message` |                      | Wraps another output parser. If that output parser errors, then this will pass the error message and the bad output to an LLM and ask it to fix the output.                                                                                              |
-| [RetryWithError](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.retry.RetryWithErrorOutputParser.html#langchain.output_parsers.retry.RetryWithErrorOutputParser)                          |                    |                         | ✅         | `str` \| `Message` |                      | Wraps another output parser. If that output parser errors, then this will pass the original inputs, the bad output, and the error message to an LLM and ask it to fix it. Compared to OutputFixingParser, this one also sends the original instructions. |
-| [Pydantic](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html#langchain_core.output_parsers.pydantic.PydanticOutputParser)                                 |                    | ✅                       |           | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format.                                                                                                                                                                                     |
-| [YAML](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.yaml.YamlOutputParser.html#langchain.output_parsers.yaml.YamlOutputParser)                                                          |                    | ✅                       |           | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. Uses YAML to encode it.                                                                                                                                                             |
-| [PandasDataFrame](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser.html#langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser) |                    | ✅                       |           | `str` \| `Message` | `dict`               | Useful for doing operations with pandas DataFrames.                                                                                                                                                                                                      |
-| [Enum](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.enum.EnumOutputParser.html#langchain.output_parsers.enum.EnumOutputParser)                                                          |                    | ✅                       |           | `str` \| `Message` | `Enum`               | Parses response into one of the provided enum values.                                                                                                                                                                                                    |
-| [Datetime](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser)                                      |                    | ✅                       |           | `str` \| `Message` | `datetime.datetime`  | Parses response into a datetime string.                                                                                                                                                                                                                  |
-| [Structured](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser)                            |                    | ✅                       |           | `str` \| `Message` | `Dict[str, str]`     | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs.                                            |
-
-For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers).
diff --git a/langchain_md_files/concepts/prompt_templates.mdx b/langchain_md_files/concepts/prompt_templates.mdx
deleted file mode 100644
index b8bb74314db2d0c02fe97ece2b36f65f65a6d491..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/prompt_templates.mdx
+++ /dev/null
@@ -1,79 +0,0 @@
-# Prompt Templates
-
-Prompt templates help to translate user input and parameters into instructions for a language model.
-This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.
-
-Prompt Templates take as input a dictionary, where each key represents a variable in the prompt template to fill in.
-
-Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or a list of messages.
-The reason this PromptValue exists is to make it easy to switch between strings and messages.
-
-There are a few different types of prompt templates:
-
-## String PromptTemplates
-
-These prompt templates are used to format a single string, and generally are used for simpler inputs.
-For example, a common way to construct and use a PromptTemplate is as follows:
-
-```python
-from langchain_core.prompts import PromptTemplate
-
-prompt_template = PromptTemplate.from_template("Tell me a joke about {topic}")
-
-prompt_template.invoke({"topic": "cats"})
-```
-
-## ChatPromptTemplates
-
-These prompt templates are used to format a list of messages. These "templates" consist of a list of templates themselves.
-For example, a common way to construct and use a ChatPromptTemplate is as follows:
-
-```python
-from langchain_core.prompts import ChatPromptTemplate
-
-prompt_template = ChatPromptTemplate([
-    ("system", "You are a helpful assistant"),
-    ("user", "Tell me a joke about {topic}")
-])
-
-prompt_template.invoke({"topic": "cats"})
-```
-
-In the above example, this ChatPromptTemplate will construct two messages when called.
-The first is a system message, that has no variables to format.
-The second is a HumanMessage, and will be formatted by the `topic` variable the user passes in.
-
-## MessagesPlaceholder
-<span data-heading-keywords="messagesplaceholder"></span>
-
-This prompt template is responsible for adding a list of messages in a particular place.
-In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.
-But what if we wanted the user to pass in a list of messages that we would slot into a particular spot?
-This is how you use MessagesPlaceholder.
-
-```python
-from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
-from langchain_core.messages import HumanMessage
-
-prompt_template = ChatPromptTemplate([
-    ("system", "You are a helpful assistant"),
-    MessagesPlaceholder("msgs")
-])
-
-prompt_template.invoke({"msgs": [HumanMessage(content="hi!")]})
-```
-
-This will produce a list of two messages, the first one being a system message, and the second one being the HumanMessage we passed in.
-If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in).
-This is useful for letting a list of messages be slotted into a particular spot.
-
-An alternative way to accomplish the same thing without using the `MessagesPlaceholder` class explicitly is:
-
-```python
-prompt_template = ChatPromptTemplate([
-    ("system", "You are a helpful assistant"),
-    ("placeholder", "{msgs}") # <-- This is the changed part
-])
-```
-
-For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).
diff --git a/langchain_md_files/concepts/rag.mdx b/langchain_md_files/concepts/rag.mdx
deleted file mode 100644
index 0180aa74a87a517d68c6970b4e46f9ec8fac5711..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/rag.mdx
+++ /dev/null
@@ -1,98 +0,0 @@
-# Retrieval augmented generation (RAG)
-
-:::info[Prerequisites]
-
-* [Retrieval](/docs/concepts/retrieval/)
-
-:::
-
-## Overview
-
-Retrieval Augmented Generation (RAG) is a powerful technique that enhances [language models](/docs/concepts/chat_models/) by combining them with external knowledge bases. 
-RAG addresses [a key limitation of models](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise): models rely on fixed training datasets, which can lead to outdated or incomplete information.
-When given a query, RAG systems first search a knowledge base for relevant information.
-The system then incorporates this retrieved information into the model's prompt.
-The model uses the provided context to generate a response to the query.
-By bridging the gap between vast language models and dynamic, targeted information retrieval, RAG is a powerful technique for building more capable and reliable AI systems.
-
-## Key concepts
-
-![Conceptual Overview](/img/rag_concepts.png)
-
-(1) **Retrieval system**: Retrieve relevant information from a knowledge base.
-
-(2) **Adding external knowledge**: Pass retrieved information to a model.
-
-## Retrieval system
-
-Model's have internal knowledge that is often fixed, or at least not updated frequently due to the high cost of training.
-This limits their ability to answer questions about current events, or to provide specific domain knowledge.
-To address this, there are various knowledge injection techniques like [fine-tuning](https://hamel.dev/blog/posts/fine_tuning_valuable.html) or continued pre-training.
-Both are [costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise) and often [poorly suited](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) for factual retrieval.
-Using a retrieval system offers several advantages:
-
-- **Up-to-date information**: RAG can access and utilize the latest data, keeping responses current.
-- **Domain-specific expertise**: With domain-specific knowledge bases, RAG can provide answers in specific domains.
-- **Reduced hallucination**: Grounding responses in retrieved facts helps minimize false or invented information.
-- **Cost-effective knowledge integration**: RAG offers a more efficient alternative to expensive model fine-tuning.
-
-:::info[Further reading]
-
-See our conceptual guide on [retrieval](/docs/concepts/retrieval/).
-
-:::
-
-## Adding external knowledge
-
-With a retrieval system in place, we need to pass knowledge from this system to the model. 
-A RAG pipeline typically achieves this following these steps:
-
-- Receive an input query.
-- Use the retrieval system to search for relevant information based on the query.
-- Incorporate the retrieved information into the prompt sent to the LLM.
-- Generate a response that leverages the retrieved context.
-
-As an example, here's a simple RAG workflow that passes information from a [retriever](/docs/concepts/retrievers/) to a [chat model](/docs/concepts/chat_models/):
-
-```python
-from langchain_openai import ChatOpenAI
-from langchain_core.messages import SystemMessage, HumanMessage
-
-# Define a system prompt that tells the model how to use the retrieved context
-system_prompt = """You are an assistant for question-answering tasks. 
-Use the following pieces of retrieved context to answer the question. 
-If you don't know the answer, just say that you don't know. 
-Use three sentences maximum and keep the answer concise.
-Context: {context}:"""
-    
-# Define a question
-question = """What are the main components of an LLM-powered autonomous agent system?"""
-
-# Retrieve relevant documents
-docs = retriever.invoke(question)
-
-# Combine the documents into a single string
-docs_text = "".join(d.page_content for d in docs)
-
-# Populate the system prompt with the retrieved context
-system_prompt_fmt = system_prompt.format(context=docs_text)
-
-# Create a model
-model = ChatOpenAI(model="gpt-4o", temperature=0) 
-
-# Generate a response
-questions = model.invoke([SystemMessage(content=system_prompt_fmt),
-                          HumanMessage(content=question)])
-```
-
-:::info[Further reading]
-
-RAG a deep area with many possible optimization and design choices:
-
-* See [this excellent blog](https://cameronrwolfe.substack.com/p/a-practitioners-guide-to-retrieval?utm_source=profile&utm_medium=reader2) from Cameron Wolfe for a comprehensive overview and history of RAG.
-* See our [RAG how-to guides](/docs/how_to/#qa-with-rag).
-* See our RAG [tutorials](/docs/tutorials/).
-* See our RAG from Scratch course, with [code](https://github.com/langchain-ai/rag-from-scratch) and [video playlist](https://www.youtube.com/playlist?list=PLfaIDFEXuae2LXbO1_PKyVJiQ23ZztA0x).
-* Also, see our RAG from Scratch course [on Freecodecamp](https://youtu.be/sVcwVQRHIc8?feature=shared).
-
-:::
diff --git a/langchain_md_files/concepts/retrieval.mdx b/langchain_md_files/concepts/retrieval.mdx
deleted file mode 100644
index c3430fc7b231c1ef18502ca909208d7746508c03..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/retrieval.mdx
+++ /dev/null
@@ -1,242 +0,0 @@
-# Retrieval
-
-:::info[Prerequisites]
-
-* [Retrievers](/docs/concepts/retrievers/)
-* [Vector stores](/docs/concepts/vectorstores/)
-* [Embeddings](/docs/concepts/embedding_models/)
-* [Text splitters](/docs/concepts/text_splitters/)
-
-:::
-
-:::danger[Security]
- 
-Some of the concepts reviewed here utilize models to generate queries (e.g., for SQL or graph databases).
-There are inherent risks in doing this. 
-Make sure that your database connection permissions are scoped as narrowly as possible for your application's needs. 
-This will mitigate, though not eliminate, the risks of building a model-driven system capable of querying databases. 
-For more on general security best practices, see our [security guide](/docs/security/).
-
-:::
-
-## Overview 
-
-Retrieval systems are fundamental to many AI applications, efficiently identifying relevant information from large datasets. 
-These systems accommodate various data formats:
-
-- Unstructured text (e.g., documents) is often stored in vector stores or lexical search indexes.
-- Structured data is typically housed in relational or graph databases with defined schemas.
-
-Despite the growing diversity in data formats, modern AI applications increasingly aim to make all types of data accessible through natural language interfaces. 
-Models play a crucial role in this process by translating natural language queries into formats compatible with the underlying search index or database. 
-This translation enables more intuitive and flexible interactions with complex data structures.
-
-## Key concepts 
-
-![Retrieval](/img/retrieval_concept.png)
-
-(1) **Query analysis**: A process where models transform or construct search queries to optimize retrieval.
-
-(2) **Information retrieval**: Search queries are used to fetch information from various retrieval systems.
-
-## Query analysis 
-
-While users typically prefer to interact with retrieval systems using natural language, these systems may require specific query syntax or benefit from certain keywords. 
-Query analysis serves as a bridge between raw user input and optimized search queries. Some common applications of query analysis include:
-
-1. **Query Re-writing**: Queries can be re-written or expanded to improve semantic or lexical searches.
-2. **Query Construction**: Search indexes may require structured queries (e.g., SQL for databases).
-
-Query analysis employs models to transform or construct optimized search queries from raw user input. 
-
-### Query re-writing
-
-Retrieval systems should ideally handle a wide spectrum of user inputs, from simple and poorly worded queries to complex, multi-faceted questions. 
-To achieve this versatility, a popular approach is to use models to transform raw user queries into more effective search queries. 
-This transformation can range from simple keyword extraction to sophisticated query expansion and reformulation.
-Here are some key benefits of using models for query analysis in unstructured data retrieval:
-
-1. **Query Clarification**: Models can rephrase ambiguous or poorly worded queries for clarity.
-2. **Semantic Understanding**: They can capture the intent behind a query, going beyond literal keyword matching.
-3. **Query Expansion**: Models can generate related terms or concepts to broaden the search scope.
-4. **Complex Query Handling**: They can break down multi-part questions into simpler sub-queries.
-
-Various techniques have been developed to leverage models for query re-writing, including:
-
-| Name                                                                                                      | When to use                                                                                     | Description                                                                                                                                                                                                                                                                            |
-|-----------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Multi-query](/docs/how_to/MultiQueryRetriever/)                                                          | When you want to ensure high recall in retrieval by providing multiple phrasings of a question. | Rewrite the user question with multiple phrasings, retrieve documents for each rewritten question, return the unique documents for all queries.                                                                                                                                        |
-| [Decomposition](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems.                                    | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer).                                                           |
-| [Step-back](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb)     | When a higher-level conceptual understanding is required.                                       | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. [Paper](https://arxiv.org/pdf/2310.06117).                                            |
-| [HyDE](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb)          | If you have challenges retrieving relevant documents using the raw user inputs.                 | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. [Paper](https://arxiv.org/abs/2212.10496). |
-
-As an example, query decomposition can simply be accomplished using prompting and a structured output that enforces a list of sub-questions.
-These can then be run sequentially or in parallel on a downstream retrieval system.
-
-```python
-from typing import List
-
-from pydantic import BaseModel, Field
-from langchain_openai import ChatOpenAI
-from langchain_core.messages import SystemMessage, HumanMessage
-
-# Define a pydantic model to enforce the output structure
-class Questions(BaseModel):
-    questions: List[str] = Field(
-        description="A list of sub-questions related to the input query."
-    )
-
-# Create an instance of the model and enforce the output structure
-model = ChatOpenAI(model="gpt-4o", temperature=0) 
-structured_model = model.with_structured_output(Questions)
-
-# Define the system prompt
-system = """You are a helpful assistant that generates multiple sub-questions related to an input question. \n
-The goal is to break down the input into a set of sub-problems / sub-questions that can be answers in isolation. \n"""
-
-# Pass the question to the model
-question = """What are the main components of an LLM-powered autonomous agent system?"""
-questions = structured_model.invoke([SystemMessage(content=system)]+[HumanMessage(content=question)])
-```
-
-:::tip
-
-See our RAG from Scratch videos for a few different specific approaches:
-- [Multi-query](https://youtu.be/JChPi0CRnDY?feature=shared)
-- [Decomposition](https://youtu.be/h0OPWlEOank?feature=shared)
-- [Step-back](https://youtu.be/xn1jEjRyJ2U?feature=shared)
-- [HyDE](https://youtu.be/SaDzIVkYqyY?feature=shared)
-
-:::
-
-### Query construction
-
-Query analysis also can focus on translating natural language queries into specialized query languages or filters. 
-This translation is crucial for effectively interacting with various types of databases that house structured or semi-structured data.
-
-1. **Structured Data examples**: For relational and graph databases, Domain-Specific Languages (DSLs) are used to query data.
-   - **Text-to-SQL**: [Converts natural language to SQL](https://paperswithcode.com/task/text-to-sql) for relational databases.
-   - **Text-to-Cypher**: [Converts natural language to Cypher](https://neo4j.com/labs/neodash/2.4/user-guide/extensions/natural-language-queries/) for graph databases.
-
-2. **Semi-structured Data examples**: For vectorstores, queries can combine semantic search with metadata filtering.
-   - **Natural Language to Metadata Filters**: Converts user queries into [appropriate metadata filters](https://docs.pinecone.io/guides/data/filter-with-metadata).
-
-These approaches leverage models to bridge the gap between user intent and the specific query requirements of different data storage systems. Here are some popular techniques:
-
-| Name                                     | When to Use                                                                                                                          | Description                                                                                                                                                                                                                                          |
-|------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Self Query](/docs/how_to/self_query/)   | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
-| [Text to SQL](/docs/tutorials/sql_qa/)   | If users are asking questions that require information housed in a relational database, accessible via SQL.                          | This uses an LLM to transform user input into a SQL query.                                                                                                                                                                                           |
-| [Text-to-Cypher](/docs/tutorials/graph/) | If users are asking questions that require information housed in a graph database, accessible via Cypher.                            | This uses an LLM to transform user input into a Cypher query.                                                                                                                                                                                        |
-
-As an example, here is how to use the `SelfQueryRetriever` to convert natural language queries into metadata filters.  
-
-```python
-metadata_field_info = schema_for_metadata 
-document_content_description = "Brief summary of a movie"
-llm = ChatOpenAI(temperature=0)
-retriever = SelfQueryRetriever.from_llm(
-    llm,
-    vectorstore,
-    document_content_description,
-    metadata_field_info,
-)
-```
-
-:::info[Further reading]
-
-* See our tutorials on [text-to-SQL](/docs/tutorials/sql_qa/), [text-to-Cypher](/docs/tutorials/graph/), and [query analysis for metadata filters](/docs/tutorials/rag/#query-analysis).
-* See our [blog post overview](https://blog.langchain.dev/query-construction/).
-* See our RAG from Scratch video on [query construction](https://youtu.be/kl6NwWYxvbM?feature=shared).
-
-::: 
-
-## Information retrieval 
-
-### Common retrieval systems
-
-#### Lexical search indexes
-
-Many search engines are based upon matching words in a query to the words in each document. 
-This approach is called lexical retrieval, using search [algorithms that are typically based upon word frequencies](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2).
-The intution is simple: a word appears frequently both in the user’s query and a particular document, then this document might be a good match.
-
-The particular data structure used to implement this is often an [*inverted index*](https://www.geeksforgeeks.org/inverted-index/).
-This types of index contains a list of words and a mapping of each word to a list of locations at which it occurs in various documents. 
-Using this data structure, it is possible to efficiently match the words in search queries to the documents in which they appear.
-[BM25](https://en.wikipedia.org/wiki/Okapi_BM25#:~:text=BM25%20is%20a%20bag%2Dof,slightly%20different%20components%20and%20parameters.) and [TF-IDF](https://en.wikipedia.org/wiki/Tf%E2%80%93idf) are [two popular lexical search algorithms](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2).
-
-:::info[Further reading]
-
-* See the [BM25](/docs/integrations/retrievers/bm25/) retriever integration.
-* See the [Elasticsearch](/docs/integrations/retrievers/elasticsearch_retriever/) retriever integration.
-
-::: 
-
-#### Vector indexes
-
-Vector indexes are an alternative way to index and store unstructured data.
-See our conceptual guide on [vectorstores](/docs/concepts/vectorstores/) for a detailed overview.  
-In short, rather than using word frequencies, vectorstores use an [embedding model](/docs/concepts/embedding_models/) to compress documents into high-dimensional vector representation. 
-This allows for efficient similarity search over embedding vectors using simple mathematical operations like cosine similarity.
-
-:::info[Further reading]
-
-* See our [how-to guide](/docs/how_to/vectorstore_retriever/) for more details on working with vectorstores.
-* See our [list of vectorstore integrations](/docs/integrations/vectorstores/).
-* See Cameron Wolfe's [blog post](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2) on the basics of vector search.
-
-:::
-
-#### Relational databases
-
-Relational databases are a fundamental type of structured data storage used in many applications. 
-They organize data into tables with predefined schemas, where each table represents an entity or relationship. 
-Data is stored in rows (records) and columns (attributes), allowing for efficient querying and manipulation through SQL (Structured Query Language). 
-Relational databases excel at maintaining data integrity, supporting complex queries, and handling relationships between different data entities.
-
-:::info[Further reading]
-
-* See our [tutorial](/docs/tutorials/sql_qa/) for working with SQL databases.
-* See our [SQL database toolkit](/docs/integrations/tools/sql_database/).
-
-:::
-
-#### Graph databases
-
-Graph databases are a specialized type of database designed to store and manage highly interconnected data. 
-Unlike traditional relational databases, graph databases use a flexible structure consisting of nodes (entities), edges (relationships), and properties. 
-This structure allows for efficient representation and querying of complex, interconnected data.
-Graph databases store data in a graph structure, with nodes, edges, and properties.
-They are particularly useful for storing and querying complex relationships between data points, such as social networks, supply-chain management, fraud detection, and recommendation services
-
-:::info[Further reading]
-
-* See our [tutorial](/docs/tutorials/graph/) for working with graph databases.
-* See our [list of graph database integrations](/docs/integrations/graphs/). 
-* See Neo4j's [starter kit for LangChain](https://neo4j.com/developer-blog/langchain-neo4j-starter-kit/).
-
-:::
-
-### Retriever  
-
-LangChain provides a unified interface for interacting with various retrieval systems through the [retriever](/docs/concepts/retrievers/) concept. The interface is straightforward:
-
-1. Input: A query (string)
-2. Output: A list of documents (standardized LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects)
-
-You can create a retriever using any of the retrieval systems mentioned earlier. The query analysis techniques we discussed are particularly useful here, as they enable natural language interfaces for databases that typically require structured query languages.
-For example, you can build a retriever for a SQL database using text-to-SQL conversion. This allows a natural language query (string) to be transformed into a SQL query behind the scenes.
-Regardless of the underlying retrieval system, all retrievers in LangChain share a common interface. You can use them with the simple `invoke` method:
-
-
-```python
-docs = retriever.invoke(query)
-```
-
-:::info[Further reading]
-
-* See our [conceptual guide on retrievers](/docs/concepts/retrievers/).
-* See our [how-to guide](/docs/how_to/#retrievers) on working with retrievers.
-
-:::
diff --git a/langchain_md_files/concepts/retrievers.mdx b/langchain_md_files/concepts/retrievers.mdx
deleted file mode 100644
index baa3fc7c174e14e4f88f6890086f64ccb4ed1a0f..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/retrievers.mdx
+++ /dev/null
@@ -1,145 +0,0 @@
-# Retrievers
-
-<span data-heading-keywords="retriever,retrievers"></span>
-
-:::info[Prerequisites]
-
-* [Vector stores](/docs/concepts/vectorstores/)
-* [Embeddings](/docs/concepts/embedding_models/)
-* [Text splitters](/docs/concepts/text_splitters/)
-
-:::
-
-## Overview
-
-Many different types of retrieval systems exist, including vectorstores, graph databases, and relational databases.
-With the rise on popularity of large language models, retrieval systems have become an important component in AI application (e.g., [RAG](/docs/concepts/rag/)).
-Because of their importance and variability, LangChain provides a uniform interface for interacting with different types of retrieval systems.
-The LangChain [retriever](/docs/concepts/retrievers/) interface is straightforward:
-
-1. Input: A query (string)
-2. Output: A list of documents (standardized LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects)
-
-## Key concept
-
-![Retriever](/img/retriever_concept.png)
- 
-All retrievers implement a simple interface for retrieving documents using natural language queries.
-
-## Interface 
-
-The only requirement for a retriever is the ability to accepts a query and return documents. 
-In particular, [LangChain's retriever class](https://python.langchain.com/api_reference/core/retrievers/langchain_core.retrievers.BaseRetriever.html#) only requires that the `_get_relevant_documents` method is implemented, which takes a `query: str` and returns a list of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects that are most relevant to the query.
-The underlying logic used to get relevant documents is specified by the retriever and can be whatever is most useful for the application.
-
-A LangChain retriever is a [runnable](/docs/how_to/lcel_cheatsheet/), which is a standard interface is for LangChain components. 
-This means that it has a few common methods, including `invoke`, that are used to interact with it. A retriever can be invoked with a query:
-
-```python
-docs = retriever.invoke(query)
-```
-
-Retrievers return a list of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects, which have two attributes:
-
-* `page_content`: The content of this document. Currently is a string.
-* `metadata`: Arbitrary metadata associated with this document (e.g., document id, file name, source, etc). 
-
-:::info[Further reading]
-
-* See our [how-to guide](/docs/how_to/custom_retriever/) on building your own custom retriever.
-
-:::
- 
-## Common types
-
-Despite the flexibility of the retriever interface, a few common types of retrieval systems are frequently used.
-
-### Search apis
-
-It's important to note that retrievers don't need to actually *store* documents. 
-For example, we can be built retrievers on top of search APIs that simply return search results! 
-See our retriever integrations with [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/) or [Wikipedia Search](/docs/integrations/retrievers/wikipedia/). 
-
-### Relational or graph database
-
-Retrievers can be built on top of relational or graph databases.
-In these cases, [query analysis](/docs/concepts/retrieval/) techniques to construct a structured query from natural language is critical.
-For example, you can build a retriever for a SQL database using text-to-SQL conversion. This allows a natural language query (string) retriever to be transformed into a SQL query behind the scenes.
-
-:::info[Further reading]
-
-* See our [tutorial](/docs/tutorials/sql_qa/) for context on how to build a retreiver using a SQL database and text-to-SQL.
-* See our [tutorial](/docs/tutorials/graph/) for context on how to build a retreiver using a graph database and text-to-Cypher.
-
-:::
-
-### Lexical search
-
-As discussed in our conceptual review of [retrieval](/docs/concepts/retrieval/), many search engines are based upon matching words in a query to the words in each document. 
-[BM25](https://en.wikipedia.org/wiki/Okapi_BM25#:~:text=BM25%20is%20a%20bag%2Dof,slightly%20different%20components%20and%20parameters.) and [TF-IDF](https://en.wikipedia.org/wiki/Tf%E2%80%93idf) are [two popular lexical search algorithms](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2).
-LangChain has retrievers for many popular lexical search algorithms / engines.
-
-:::info[Further reading]
-
-* See the [BM25](/docs/integrations/retrievers/bm25/) retriever integration.
-* See the [TF-IDF](/docs/integrations/retrievers/tf_idf/) retriever integration.
-* See the [Elasticsearch](/docs/integrations/retrievers/elasticsearch_retriever/) retriever integration.
-
-::: 
-
-### Vector store 
-
-[Vector stores](/docs/concepts/vectorstores/) are a powerful and efficient way to index and retrieve unstructured data. 
-A vectorstore can be used as a retriever by calling the `as_retriever()` method.
-
-```python
-vectorstore = MyVectorStore()
-retriever = vectorstore.as_retriever()
-```
-
-## Advanced retrieval patterns
-
-### Ensemble 
-
-Because the retriever interface is so simple, returning a list of `Document` objects given a search query, it is possible to combine multiple retrievers using ensembling.
-This is particularly useful when you have multiple retrievers that are good at finding different types of relevant documents.
-It is easy to create an [ensemble retriever](/docs/how_to/ensemble_retriever/) that combines multiple retrievers with linear weighted scores:
-
-```python
-# Initialize the ensemble retriever
-ensemble_retriever = EnsembleRetriever(
-    retrievers=[bm25_retriever, vector_store_retriever], weights=[0.5, 0.5]
-)
-```
-
-When ensembling, how do we combine search results from many retrievers? 
-This motivates the concept of re-ranking, which takes the output of multiple retrievers and combines them using a more sophisticated algorithm such as [Reciprocal Rank Fusion (RRF)](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf).
-
-### Source document retention 
-
-Many retrievers utilize some kind of index to make documents easily searchable.
-The process of indexing can include a transformation step (e.g., vectorstores often use document splitting). 
-Whatever transformation is used, can be very useful to retain a link between the *transformed document* and the original, giving the retriever the ability to return the *original* document.
-
-![Retrieval with full docs](/img/retriever_full_docs.png)
-
-This is particularly useful in AI applications, because it ensures no loss in document context for the model.
-For example, you may use small chunk size for indexing documents in a vectorstore. 
-If you return *only* the chunks as the retrieval result, then the model will have lost the original document context for the chunks. 
-
-LangChain has two different retrievers that can be used to address this challenge. 
-The [Multi-Vector](/docs/how_to/multi_vector/) retriever allows the user to use any document transformation (e.g., use an LLM to write a summary of the document) for indexing while retaining linkage to the source document. 
-The [ParentDocument](/docs/how_to/parent_document_retriever/) retriever links document chunks from a text-splitter transformation for indexing while retaining linkage to the source document. 
-
-| Name                                                      | Index Type                    | Uses an LLM               | When to Use                                                                                                                             | Description                                                                                                                                                                                                              |
-|-----------------------------------------------------------|-------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vector store + Document Store | No                        | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). |
-| [Multi Vector](/docs/how_to/multi_vector/)                | Vector store + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself.                    | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions.                                         |
-
-:::info[Further reading]
-
-* See our [how-to guide](/docs/how_to/parent_document_retriever/) on using the ParentDocument retriever.
-* See our [how-to guide](/docs/how_to/multi_vector/) on using the MultiVector retriever.
-* See our RAG from Scratch video on the [multi vector retriever](https://youtu.be/gTCU9I6QqCE?feature=shared).
-
-:::
diff --git a/langchain_md_files/concepts/runnables.mdx b/langchain_md_files/concepts/runnables.mdx
deleted file mode 100644
index 0ba5e0027f3f08785c0e36e8f049dc5a03c3a66a..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/runnables.mdx
+++ /dev/null
@@ -1,352 +0,0 @@
-# Runnable interface
-
-The Runnable interface is the foundation for working with LangChain components, and it's implemented across many of them, such as [language models](/docs/concepts/chat_models), [output parsers](/docs/concepts/output_parsers), [retrievers](/docs/concepts/retrievers), [compiled LangGraph graphs](
-https://langchain-ai.github.io/langgraph/concepts/low_level/#compiling-your-graph) and more.
-
-This guide covers the main concepts and methods of the Runnable interface, which allows developers to interact with various LangChain components in a consistent and predictable manner.
-
-:::info Related Resources
-* The ["Runnable" Interface API Reference](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) provides a detailed overview of the Runnable interface and its methods.
-* A list of built-in `Runnables` can be found in the [LangChain Core API Reference](https://python.langchain.com/api_reference/core/runnables.html). Many of these Runnables are useful when composing custom "chains" in LangChain using the [LangChain Expression Language (LCEL)](/docs/concepts/lcel).
-:::
-
-## Overview of runnable interface
-
-The Runnable way defines a standard interface that allows a Runnable component to be:
-
-* [Invoked](/docs/how_to/lcel_cheatsheet/#invoke-a-runnable): A single input is transformed into an output.
-* [Batched](/docs/how_to/lcel_cheatsheet/#batch-a-runnable): Multiple inputs are efficiently transformed into outputs.
-* [Streamed](/docs/how_to/lcel_cheatsheet/#stream-a-runnable): Outputs are streamed as they are produced.
-* Inspected: Schematic information about Runnable's input, output, and configuration can be accessed.
-* Composed: Multiple Runnables can be composed to work together using [the LangChain Expression Language (LCEL)](/docs/concepts/lcel) to create complex pipelines.
-
-Please review the [LCEL Cheatsheet](/docs/how_to/lcel_cheatsheet) for some common patterns that involve the Runnable interface and LCEL expressions.
-
-<a id="batch"></a>
-### Optimized parallel execution (batch)
-<span data-heading-keywords="batch"></span>
-
-LangChain Runnables offer a built-in `batch` (and `batch_as_completed`) API that allow you to process multiple inputs in parallel.
-
-Using these methods can significantly improve performance when needing to process multiple independent inputs, as the
-processing can be done in parallel instead of sequentially.
-
-The two batching options are:
-
-* `batch`: Process multiple inputs in parallel, returning results in the same order as the inputs.
-* `batch_as_completed`: Process multiple inputs in parallel, returning results as they complete. Results may arrive out of order, but each includes the input index for matching.
-
-The default implementation of `batch` and `batch_as_completed` use a thread pool executor to run the `invoke` method in parallel. This allows for efficient parallel execution without the need for users to manage threads, and speeds up code that is I/O-bound (e.g., making API requests, reading files, etc.). It will not be as effective for CPU-bound operations, as the GIL (Global Interpreter Lock) in Python will prevent true parallel execution.
-
-Some Runnables may provide their own implementations of `batch` and `batch_as_completed` that are optimized for their specific use case (e.g.,
-rely on a `batch` API provided by a model provider).
-
-:::note
-The async versions of `abatch` and `abatch_as_completed` relies on asyncio's [gather](https://docs.python.org/3/library/asyncio-task.html#asyncio.gather) and [as_completed](https://docs.python.org/3/library/asyncio-task.html#asyncio.as_completed) functions to run the `ainvoke` method in parallel.
-:::
-
-:::tip
-When processing a large number of inputs using `batch` or `batch_as_completed`, users may want to control the maximum number of parallel calls. This can be done by setting the `max_concurrency` attribute in the `RunnableConfig` dictionary. See the [RunnableConfig](/docs/concepts/runnables/#runnableconfig) for more information.
-
-Chat Models also have a built-in [rate limiter](/docs/concepts/chat_models#rate-limiting) that can be used to control the rate at which requests are made.
-:::
-
-### Asynchronous support
-<span data-heading-keywords="async-api"></span>
-
-Runnables expose an asynchronous API, allowing them to be called using the `await` syntax in Python. Asynchronous methods can be identified by the "a" prefix (e.g., `ainvoke`, `abatch`, `astream`, `abatch_as_completed`).
-
-Please refer to the [Async Programming with LangChain](/docs/concepts/async) guide for more details.
-
-## Streaming APIs
-<span data-heading-keywords="streaming-api"></span>
-
-Streaming is critical in making applications based on LLMs feel responsive to end-users.
-
-Runnables expose the following three streaming APIs:
-
-1. sync [stream](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) and async [astream](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream): yields the output a Runnable as it is generated.
-2. The async `astream_events`: a more advanced streaming API that allows streaming intermediate steps and final output
-3. The **legacy** async `astream_log`: a legacy streaming API that streams intermediate steps and final output
-
-Please refer to the [Streaming Conceptual Guide](/docs/concepts/streaming) for more details on how to stream in LangChain.
-
-## Input and output types
-
-Every `Runnable` is characterized by an input and output type. These input and output types can be any Python object, and are defined by the Runnable itself.
-
-Runnable methods that result in the execution of the Runnable (e.g., `invoke`, `batch`, `stream`, `astream_events`) work with these input and output types.
-
-* invoke: Accepts an input and returns an output.
-* batch: Accepts a list of inputs and returns a list of outputs.
-* stream: Accepts an input and returns a generator that yields outputs.
-
-The **input type** and **output type** vary by component:
-
-| Component    | Input Type                                       | Output Type           |
-|--------------|--------------------------------------------------|-----------------------|
-| Prompt       | dictionary                                       | PromptValue           |
-| ChatModel    | a string, list of chat messages or a PromptValue | ChatMessage           |
-| LLM          | a string, list of chat messages or a PromptValue | String                |
-| OutputParser | the output of an LLM or ChatModel                | Depends on the parser |
-| Retriever    | a string                                         | List of Documents     |
-| Tool         | a string or dictionary, depending on the tool    | Depends on the tool   |
-
-Please refer to the individual component documentation for more information on the input and output types and how to use them.
-
-### Inspecting schemas
-
-:::note
-This is an advanced feature that is unnecessary for most users. You should probably
-skip this section unless you have a specific need to inspect the schema of a Runnable.
-:::
-
-In more advanced use cases, you may want to programmatically **inspect** the Runnable and determine what input and output types the Runnable expects and produces.
-
-The Runnable interface provides methods to get the [JSON Schema](https://json-schema.org/) of the input and output types of a Runnable, as well as [Pydantic schemas](https://docs.pydantic.dev/latest/) for the input and output types.
-
-These APIs are mostly used internally for unit-testing and by [LangServe](/docs/concepts/architecture#langserve) which uses the APIs for input validation and generation of [OpenAPI documentation](https://www.openapis.org/).
-
-In addition, to the input and output types, some Runnables have been set up with additional run time configuration options. 
-There are corresponding APIs to get the Pydantic Schema and JSON Schema of the configuration options for the Runnable.
-Please see the [Configurable Runnables](#configurable-runnables) section for more information.
-
-| Method                  | Description                                                      |
-|-------------------------|------------------------------------------------------------------|
-| `get_input_schema`      | Gives the Pydantic Schema of the input schema for the Runnable.  |
-| `get_output_schema`      | Gives the Pydantic Schema of the output schema for the Runnable. |
-| `config_schema`         | Gives the Pydantic Schema of the config schema for the Runnable. |
-| `get_input_jsonschema`  | Gives the JSONSchema of the input schema for the Runnable.       |
-| `get_output_jsonschema` | Gives the JSONSchema of the output schema for the Runnable.      |
-| `get_config_jsonschema` | Gives the JSONSchema of the config schema for the Runnable.      |
-
-
-#### With_types
-
-LangChain will automatically try to infer the input and output types of a Runnable based on available information.
-
-Currently, this inference does not work well for more complex Runnables that are built using [LCEL](/docs/concepts/lcel) composition, and the inferred input and / or output types may be incorrect. In these cases, we recommend that users override the inferred input and output types using the `with_types` method ([API Reference](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_types
-).
-
-## RunnableConfig
-
-Any of the methods that are used to execute the runnable (e.g., `invoke`, `batch`, `stream`, `astream_events`) accept a second argument called
-`RunnableConfig` ([API Reference](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.config.RunnableConfig.html#RunnableConfig)). This argument is a dictionary that contains configuration for the Runnable that will be used
-at run time during the execution of the runnable.
-
-A `RunnableConfig` can have any of the following properties defined:
-
-| Attribute       | Description                                                                                |
-|-----------------|--------------------------------------------------------------------------------------------|
-| run_name        | Name used for the given Runnable (not inherited).                                          |
-| run_id          | Unique identifier for this call. sub-calls will get their own unique run ids.              |
-| tags            | Tags for this call and any sub-calls.                                                      |
-| metadata        | Metadata for this call and any sub-calls.                                                  |
-| callbacks       | Callbacks for this call and any sub-calls.                                                 |
-| max_concurrency | Maximum number of parallel calls to make (e.g., used by batch).                            |
-| recursion_limit | Maximum number of times a call can recurse (e.g., used by Runnables that return Runnables) |
-| configurable    | Runtime values for configurable attributes of the Runnable.                                |
-
-Passing `config` to the `invoke` method is done like so:
-
-```python
-some_runnable.invoke(
-   some_input, 
-   config={
-      'run_name': 'my_run', 
-      'tags': ['tag1', 'tag2'], 
-      'metadata': {'key': 'value'}
-      
-   }
-)
-```
-
-### Propagation of RunnableConfig
-
-Many `Runnables` are composed of other Runnables, and it is important that the `RunnableConfig` is propagated to all sub-calls made by the Runnable. This allows providing run time configuration values to the parent Runnable that are inherited by all sub-calls.
-
-If this were not the case, it would be impossible to set and propagate [callbacks](/docs/concepts/callbacks) or other configuration values like `tags` and `metadata` which
-are expected to be inherited by all sub-calls.
-
-There are two main patterns by which new `Runnables` are created:
-
-1. Declaratively using [LangChain Expression Language (LCEL)](/docs/concepts/lcel):
-
-    ```python
-    chain = prompt | chat_model | output_parser
-    ```
-
-2. Using a [custom Runnable](#custom-runnables)  (e.g., `RunnableLambda`) or using the `@tool` decorator:
-
-    ```python
-    def foo(input):
-        # Note that .invoke() is used directly here
-        return bar_runnable.invoke(input)
-    foo_runnable = RunnableLambda(foo)
-    ```
-
-LangChain will try to propagate `RunnableConfig` automatically for both of the patterns. 
-
-For handling the second pattern, LangChain relies on Python's [contextvars](https://docs.python.org/3/library/contextvars.html).
-
-In Python 3.11 and above, this works out of the box, and you do not need to do anything special to propagate the `RunnableConfig` to the sub-calls.
-
-In Python 3.9 and 3.10, if you are using **async code**, you need to manually pass the `RunnableConfig` through to the `Runnable` when invoking it. 
-
-This is due to a limitation in [asyncio's tasks](https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task)  in Python 3.9 and 3.10 which did
-not accept a `context` argument).
-
-Propagating the `RunnableConfig` manually is done like so:
-
-```python
-async def foo(input, config): # <-- Note the config argument
-    return await bar_runnable.ainvoke(input, config=config)
-    
-foo_runnable = RunnableLambda(foo)
-```
-
-:::caution
-When using Python 3.10 or lower and writing async code, `RunnableConfig` cannot be propagated
-automatically, and you will need to do it manually! This is a common pitfall when
-attempting to stream data using `astream_events` and `astream_log` as these methods
-rely on proper propagation of [callbacks](/docs/concepts/callbacks) defined inside of `RunnableConfig`.
-:::
-
-### Setting custom run name, tags, and metadata
-
-The `run_name`, `tags`, and `metadata` attributes of the `RunnableConfig` dictionary can be used to set custom values for the run name, tags, and metadata for a given Runnable.
-
-The `run_name` is a string that can be used to set a custom name for the run. This name will be used in logs and other places to identify the run. It is not inherited by sub-calls.
-
-The `tags` and `metadata` attributes are lists and dictionaries, respectively, that can be used to set custom tags and metadata for the run. These values are inherited by sub-calls.
-
-Using these attributes can be useful for tracking and debugging runs, as they will be surfaced in [LangSmith](https://docs.smith.langchain.com/) as trace attributes that you can
-filter and search on.
-
-The attributes will also be propagated to [callbacks](/docs/concepts/callbacks), and will appear in streaming APIs like [astream_events](/docs/concepts/streaming) as part of each event in the stream.
-
-:::note Related
-* [How-to trace with LangChain](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain)
-:::
-
-### Setting run id
-
-:::note
-This is an advanced feature that is unnecessary for most users.
-:::
-
-You may need to set a custom `run_id` for a given run, in case you want 
-to reference it later or correlate it with other systems.
-
-The `run_id` MUST be a valid UUID string and **unique** for each run. It is used to identify
-the parent run, sub-class will get their own unique run ids automatically.
-
-To set a custom `run_id`, you can pass it as a key-value pair in the `config` dictionary when invoking the Runnable:
-
-```python
-import uuid
-
-run_id = uuid.uuid4()
-
-some_runnable.invoke(
-   some_input, 
-   config={
-      'run_id': run_id
-   }
-)
-
-# Do something with the run_id
-```
-
-### Setting recursion limit
-
-:::note
-This is an advanced feature that is unnecessary for most users.
-:::
-
-Some Runnables may return other Runnables, which can lead to infinite recursion if not handled properly. To prevent this, you can set a `recursion_limit` in the `RunnableConfig` dictionary. This will limit the number of times a Runnable can recurse.
-
-### Setting max concurrency
-
-If using the `batch` or `batch_as_completed` methods, you can set the `max_concurrency` attribute in the `RunnableConfig` dictionary to control the maximum number of parallel calls to make. This can be useful when you want to limit the number of parallel calls to prevent overloading a server or API.
-
-
-:::tip
-If you're trying to rate limit the number of requests made by a **Chat Model**, you can use the built-in [rate limiter](/docs/concepts/chat_models#rate-limiting) instead of setting `max_concurrency`, which will be more effective.
-
-See the [How to handle rate limits](/docs/how_to/chat_model_rate_limiting/) guide for more information.
-:::
-
-### Setting configurable
-
-The `configurable` field is used to pass runtime values for configurable attributes of the Runnable.
-
-It is used frequently in [LangGraph](/docs/concepts/architecture#langgraph) with
-[LangGraph Persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/)
-and [memory](https://langchain-ai.github.io/langgraph/concepts/memory/).
-
-It is used for a similar purpose in [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) to specify either
-a `session_id` / `conversation_id` to keep track of conversation history.
-
-In addition, you can use it to specify any custom configuration options to pass to any [Configurable Runnable](#configurable-runnables) that they create.
-
-### Setting callbacks
-
-Use this option to configure [callbacks](/docs/concepts/callbacks) for the runnable at 
-runtime. The callbacks will be passed to all sub-calls made by the runnable.
-
-```python
-some_runnable.invoke(
-   some_input,
-   {
-      "callbacks": [
-         SomeCallbackHandler(),
-         AnotherCallbackHandler(),
-      ]
-   }
-)
-```
-
-Please read the [Callbacks Conceptual Guide](/docs/concepts/callbacks) for more information on how to use callbacks in LangChain.
-
-:::important
-If you're using Python 3.9 or 3.10 in an async environment, you must propagate
-the `RunnableConfig` manually to sub-calls in some cases. Please see the
-[Propagating RunnableConfig](#propagation-of-runnableconfig) section for more information.
-:::
-
-## Creating a runnable from a function {#custom-runnables}
-
-You may need to create a custom Runnable that runs arbitrary logic. This is especially
-useful if using [LangChain Expression Language (LCEL)](/docs/concepts/lcel) to compose
-multiple Runnables and you need to add custom processing logic in one of the steps.
-
-There are two ways to create a custom Runnable from a function:
-
-* `RunnableLambda`: Use this for simple transformations where streaming is not required.
-* `RunnableGenerator`: use this for more complex transformations when streaming is needed.
-
-See the [How to run custom functions](/docs/how_to/functions) guide for more information on how to use `RunnableLambda` and `RunnableGenerator`.
-
-:::important
-Users should not try to subclass Runnables to create a new custom Runnable. It is
-much more complex and error-prone than simply using `RunnableLambda` or `RunnableGenerator`.
-:::
-
-## Configurable runnables
-
-:::note
-This is an advanced feature that is unnecessary for most users.
-
-It helps with configuration of large "chains" created using the [LangChain Expression Language (LCEL)](/docs/concepts/lcel)
-and is leveraged by [LangServe](/docs/concepts/architecture#langserve) for deployed Runnables.
-:::
-
-Sometimes you may want to experiment with, or even expose to the end user, multiple different ways of doing things with your Runnable. This could involve adjusting parameters like the temperature in a chat model or even switching between different chat models.
-
-To simplify this process, the Runnable interface provides two methods for creating configurable Runnables at runtime:
-
-* `configurable_fields`: This method allows you to configure specific **attributes** in a Runnable. For example, the `temperature` attribute of a chat model.
-* `configurable_alternatives`: This method enables you to specify **alternative** Runnables that can be run during runtime. For example, you could specify a list of different chat models that can be used.
-
-See the [How to configure runtime chain internals](/docs/how_to/configure) guide for more information on how to configure runtime chain internals.
diff --git a/langchain_md_files/concepts/streaming.mdx b/langchain_md_files/concepts/streaming.mdx
deleted file mode 100644
index c2dc400e23e2033e441b444937f13a09a385dd23..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/streaming.mdx
+++ /dev/null
@@ -1,191 +0,0 @@
-# Streaming
-
-:::info Prerequisites
-* [Runnable Interface](/docs/concepts/runnables)
-* [Chat Models](/docs/concepts/chat_models)
-:::
-
-**Streaming** is crucial for enhancing the responsiveness of applications built on [LLMs](/docs/concepts/chat_models). By displaying output progressively, even before a complete response is ready, streaming significantly improves user experience (UX), particularly when dealing with the latency of LLMs.
-
-## Overview
-
-Generating full responses from [LLMs](/docs/concepts/chat_models) often incurs a delay of several seconds, which becomes more noticeable in complex applications with multiple model calls. Fortunately, LLMs generate responses iteratively, allowing for intermediate results to be displayed as they are produced. By streaming these intermediate outputs, LangChain enables smoother UX in LLM-powered apps and offers built-in support for streaming at the core of its design.
-
-In this guide, we'll discuss streaming in LLM applications and explore how LangChain's streaming APIs facilitate real-time output from various components in your application.
-
-## What to stream in LLM applications
-
-In applications involving LLMs, several types of data can be streamed to improve user experience by reducing perceived latency and increasing transparency. These include:
-
-### 1. Streaming LLM outputs
-
-The most common and critical data to stream is the output generated by the LLM itself. LLMs often take time to generate full responses, and by streaming the output in real-time, users can see partial results as they are produced. This provides immediate feedback and helps reduce the wait time for users.
-
-### 2. Streaming pipeline or workflow progress
-
-Beyond just streaming LLM output, it’s useful to stream progress through more complex workflows or pipelines, giving users a sense of how the application is progressing overall. This could include:
-
-- **In LangGraph Workflows:**
-With [LangGraph](/docs/concepts/architecture#langgraph), workflows are composed of nodes and edges that represent various steps. Streaming here involves tracking changes to the **graph state** as individual **nodes** request updates. This allows for more granular monitoring of which node in the workflow is currently active, giving real-time updates about the status of the workflow as it progresses through different stages.
-
-- **In LCEL Pipelines:**
-Streaming updates from an [LCEL](/docs/concepts/lcel) pipeline involves capturing progress from individual **sub-runnables**. For example, as different steps or components of the pipeline execute, you can stream which sub-runnable is currently running, providing real-time insight into the overall pipeline's progress.
-
-Streaming pipeline or workflow progress is essential in providing users with a clear picture of where the application is in the execution process.
-
-### 3. Streaming custom data
-
-In some cases, you may need to stream **custom data** that goes beyond the information provided by the pipeline or workflow structure. This custom information is injected within a specific step in the workflow, whether that step is a tool or a LangGraph node. For example, you could stream updates about what a tool is doing in real-time or the progress through a LangGraph node. This granular data, which is emitted directly from within the step, provides more detailed insights into the execution of the workflow and is especially useful in complex processes where more visibility is needed.
-
-## Streaming APIs
-
-LangChain has two main APIs for streaming output in real-time. These APIs are supported by any component that implements the [Runnable Interface](/docs/concepts/runnables), including [LLMs](/docs/concepts/chat_models), [compiled LangGraph graphs](https://langchain-ai.github.io/langgraph/concepts/low_level/), and any Runnable generated with [LCEL](/docs/concepts/lcel).
-
-1. sync [stream](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) and async [astream](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream): Use to stream outputs from individual Runnables (e.g., a chat model) as they are generated or stream any workflow created with LangGraph.
-2. The async only [astream_events](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events): Use this API to get access to custom events and intermediate outputs from LLM  applications built entirely with [LCEL](/docs/concepts/lcel). Note that this API is available, but not needed when working with LangGraph.
-
-:::note
-In addition, there is a **legacy** async [astream_log](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_log) API. This API is not recommended for new projects it is more complex and less feature-rich than the other streaming APIs.
-:::
-
-### `stream()` and `astream()`
-
-The `stream()` method returns an iterator that yields chunks of output synchronously as they are produced. You can use a `for` loop to process each chunk in real-time. For example, when using an LLM, this allows the output to be streamed incrementally as it is generated, reducing the wait time for users.
-
-The type of chunk yielded by the `stream()` and `astream()` methods depends on the component being streamed. For example, when streaming from an [LLM](/docs/concepts/chat_models) each component will be an [AIMessageChunk](/docs/concepts/messages#aimessagechunk); however, for other components, the chunk may be different. 
-
-The `stream()` method returns an iterator that yields these chunks as they are produced. For example,
-
-```python
-for chunk in component.stream(some_input):
-    # IMPORTANT: Keep the processing of each chunk as efficient as possible.
-    # While you're processing the current chunk, the upstream component is
-    # waiting to produce the next one. For example, if working with LangGraph,
-    # graph execution is paused while the current chunk is being processed.
-    # In extreme cases, this could even result in timeouts (e.g., when llm outputs are
-    # streamed from an API that has a timeout).
-    print(chunk)
-```
-
-The [asynchronous version](/docs/concepts/async), `astream()`, works similarly but is designed for non-blocking workflows. You can use it in asynchronous code to achieve the same real-time streaming behavior.
-
-#### Usage with chat models
-
-When using `stream()` or `astream()` with chat models, the output is streamed as [AIMessageChunks](/docs/concepts/messages#aimessagechunk) as it is generated by the LLM. This allows you to present or process the LLM's output incrementally as it's being produced, which is particularly useful in interactive applications or interfaces.
-
-#### Usage with LangGraph
-
-[LangGraph](/docs/concepts/architecture#langgraph) compiled graphs are [Runnables](/docs/concepts/runnables) and support the standard streaming APIs.
-
-When using the *stream* and *astream* methods with LangGraph, you can choose **one or more** [streaming mode](https://langchain-ai.github.io/langgraph/reference/types/#langgraph.types.StreamMode) which allow you to control the type of output that is streamed. The available streaming modes are:
-
-- **"values"**: Emit all values of the [state](https://langchain-ai.github.io/langgraph/concepts/low_level/) for each step.
-- **"updates"**: Emit only the node name(s) and updates that were returned by the node(s) after each step.
-- **"debug"**: Emit debug events for each step.
-- **"messages"**: Emit LLM [messages](/docs/concepts/messages) [token-by-token](/docs/concepts/tokens).
-- **"custom"**: Emit custom output written using [LangGraph's StreamWriter](https://langchain-ai.github.io/langgraph/reference/types/#langgraph.types.StreamWriter).
-
-For more information, please see:
-* [LangGraph streaming conceptual guide](https://langchain-ai.github.io/langgraph/concepts/streaming/) for more information on how to stream when working with LangGraph.
-* [LangGraph streaming how-to guides](https://langchain-ai.github.io/langgraph/how-tos/#streaming) for specific examples of streaming in LangGraph.
-
-#### Usage with LCEL
-
-If you compose multiple Runnables using [LangChain’s Expression Language (LCEL)](/docs/concepts/lcel), the `stream()` and `astream()` methods will, by convention, stream the output of the last step in the chain. This allows the final processed result to be streamed incrementally. **LCEL** tries to optimize streaming latency in pipelines so that the streaming results from the last step are available as soon as possible.
-
-
-
-### `astream_events`
-<span data-heading-keywords="astream_events,stream_events,stream events"></span>
-
-:::tip
-Use the `astream_events` API to access custom data and intermediate outputs from LLM applications built entirely with [LCEL](/docs/concepts/lcel). 
-
-While this API is available for use with [LangGraph](/docs/concepts/architecture#langgraph) as well, it is usually not necessary when working with LangGraph, as the `stream` and `astream` methods provide comprehensive streaming capabilities for LangGraph graphs.
-:::
-
-For chains constructed using **LCEL**, the `.stream()` method only streams the output of the final step from the chain. This might be sufficient for some applications, but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of the chain alongside the final output. For example, you may want to return sources alongside the final generation when building a chat-over-documents app.
-
-There are ways to do this [using callbacks](/docs/concepts/callbacks), or by constructing your chain in such a way that it passes intermediate
-values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an
-`.astream_events()` method that combines the flexibility of callbacks with the ergonomics of `.stream()`. When called, it returns an iterator
-which yields [various types of events](/docs/how_to/streaming/#event-reference) that you can filter and process according
-to the needs of your project.
-
-Here's one small example that prints just events containing streamed chat model output:
-
-```python
-from langchain_core.output_parsers import StrOutputParser
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_anthropic import ChatAnthropic
-
-model = ChatAnthropic(model="claude-3-sonnet-20240229")
-
-prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
-parser = StrOutputParser()
-chain = prompt | model | parser
-
-async for event in chain.astream_events({"topic": "parrot"}):
-    kind = event["event"]
-    if kind == "on_chat_model_stream":
-        print(event, end="|", flush=True)
-```
-
-You can roughly think of it as an iterator over callback events (though the format differs) - and you can use it on almost all LangChain components!
-
-See [this guide](/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.astream_events()`, including a table listing available events.
-
-## Writing custom data to the stream
-
-To write custom data to the stream, you will need to choose one of the following methods based on the component you are working with:
-
-1. LangGraph's [StreamWriter](https://langchain-ai.github.io/langgraph/reference/types/#langgraph.types.StreamWriter) can be used to write custom data that will surface through **stream** and **astream** APIs when working with LangGraph. **Important** this is a LangGraph feature, so it is not available when working with pure LCEL. See [how to streaming custom data](https://langchain-ai.github.io/langgraph/how-tos/streaming-content/) for more information.
-2. [dispatch_events](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.manager.dispatch_custom_event.html#) / [adispatch_events](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.manager.adispatch_custom_event.html) can be used to write custom data that will be surfaced through the **astream_events** API. See [how to dispatch custom callback events](/docs/how_to/callbacks_custom_events/#astream-events-api) for more information.
-
-## "Auto-Streaming" Chat Models
-
-LangChain simplifies streaming from [chat models](/docs/concepts/chat_models) by automatically enabling streaming mode in certain cases, even when you’re not explicitly calling the streaming methods. This is particularly useful when you use the non-streaming `invoke` method but still want to stream the entire application, including intermediate results from the chat model.
-
-### How It Works
-
-When you call the `invoke` (or `ainvoke`) method on a chat model, LangChain will automatically switch to streaming mode if it detects that you are trying to stream the overall application. 
-
-Under the hood, it'll have `invoke` (or `ainvoke`) use the `stream` (or `astream`) method to generate its output. The result of the invocation will be the same as far as the code that was using `invoke` is concerned; however, while the chat model is being streamed, LangChain will take care of invoking `on_llm_new_token` events in LangChain's [callback system](/docs/concepts/callbacks). These callback events
-allow LangGraph `stream`/`astream` and `astream_events` to surface the chat model's output in real-time.
-
-Example:
-
-```python
-def node(state):
-    ...
-    # The code below uses the invoke method, but LangChain will 
-    # automatically switch to streaming mode
-    # when it detects that the overall 
-    # application is being streamed.
-    ai_message = model.invoke(state["messages"])
-    ...
-
-for chunk in compiled_graph.stream(..., mode="messages"): 
-    ...
-```
-## Async Programming
-
-LangChain offers both synchronous (sync) and asynchronous (async) versions of many of its methods. The async methods are typically prefixed with an "a" (e.g., `ainvoke`, `astream`). When writing async code, it's crucial to consistently use these asynchronous methods to ensure non-blocking behavior and optimal performance.
-
-If streaming data fails to appear in real-time, please ensure that you are using the correct async methods for your workflow.
-
-Please review the [async programming in LangChain guide](/docs/concepts/async) for more information on writing async code with LangChain.
-
-## Related Resources
-
-Please see the following how-to guides for specific examples of streaming in LangChain:
-* [LangGraph conceptual guide on streaming](https://langchain-ai.github.io/langgraph/concepts/streaming/)
-* [LangGraph streaming how-to guides](https://langchain-ai.github.io/langgraph/how-tos/#streaming)
-* [How to stream runnables](/docs/how_to/streaming/): This how-to guide goes over common streaming patterns with LangChain components (e.g., chat models) and with [LCEL](/docs/concepts/lcel).
-* [How to stream chat models](/docs/how_to/chat_streaming/)
-* [How to stream tool calls](/docs/how_to/tool_streaming/)
-
-For writing custom data to the stream, please see the following resources:
-
-* If using LangGraph, see [how to stream custom data](https://langchain-ai.github.io/langgraph/how-tos/streaming-content/).
-* If using LCEL, see [how to dispatch custom callback events](/docs/how_to/callbacks_custom_events/#astream-events-api).
diff --git a/langchain_md_files/concepts/structured_outputs.mdx b/langchain_md_files/concepts/structured_outputs.mdx
deleted file mode 100644
index dad1c1a49cd8901cd96addb3b2d8ec40a5691c61..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/structured_outputs.mdx
+++ /dev/null
@@ -1,148 +0,0 @@
-# Structured outputs
-
-## Overview 
-
-For many applications, such as chatbots, models need to respond to users directly in natural language. 
-However, there are scenarios where we need models to output in a *structured format*. 
-For example, we might want to store the model output in a database and ensure that the output conforms to the database schema.
-This need motivates the concept of structured output, where models can be instructed to respond with a particular output structure.
-
-![Structured output](/img/structured_output.png)
-
-## Key concepts 
-
-**(1) Schema definition:** The output structure is represented as a schema, which can be defined in several ways. 
-**(2) Returning structured output:** The model is given this schema, and is instructed to return output that conforms to it.
-
-## Recommended usage
-
-This pseudo-code illustrates the recommended workflow when using structured output.
-LangChain provides a method, [`with_structured_output()`](/docs/how_to/structured_output/#the-with_structured_output-method), that automates the process of binding the schema to the [model](/docs/concepts/chat_models/) and parsing the output.
-This helper function is available for all model providers that support structured output. 
-
-```python
-# Define schema
-schema = {"foo": "bar"}
-# Bind schema to model
-model_with_structure = model.with_structured_output(schema)
-# Invoke the model to produce structured output that matches the schema
-structured_output = model_with_structure.invoke(user_input)
-```
-
-## Schema definition
-
-The central concept is that the output structure of model responses needs to be represented in some way. 
-While types of objects you can use depend on the model you're working with, there are common types of objects that are typically allowed or recommended for structured output in Python.
-
-The simplest and most common format for structured output is a JSON-like structure, which in Python can be represented as a dictionary (dict) or list (list).
-JSON objects (or dicts in Python) are often used directly when the tool requires raw, flexible, and minimal-overhead structured data.
-
-```json
-{
-  "answer": "The answer to the user's question",
-  "followup_question": "A followup question the user could ask"
-}
-```
-
-As a second example, [Pydantic](https://docs.pydantic.dev/latest/) is particularly useful for defining structured output schemas because it offers type hints and validation.
-Here's an example of a Pydantic schema: 
-
-```python
-from pydantic import BaseModel, Field
-class ResponseFormatter(BaseModel):
-    """Always use this tool to structure your response to the user."""
-    answer: str = Field(description="The answer to the user's question")
-    followup_question: str = Field(description="A followup question the user could ask")
-
-```
-
-## Returning structured output
-
-With a schema defined, we need a way to instruct the model to use it.
-While one approach is to include this schema in the prompt and *ask nicely* for the model to use it, this is not recommended. 
-Several more powerful methods that utilizes native features in the model provider's API are available.
-
-### Using tool calling
-
-Many [model providers support](/docs/integrations/chat/) tool calling, a concept discussed in more detail in our [tool calling guide](/docs/concepts/tool_calling/).
-In short, tool calling involves binding a tool to a model and, when appropriate, the model can *decide* to call this tool and ensure its response conforms to the tool's schema.
-With this in mind, the central concept is straightforward: *simply bind our schema to a model as a tool!*
-Here is an example using the `ResponseFormatter` schema defined above:
-
-```python
-from langchain_openai import ChatOpenAI
-model = ChatOpenAI(model="gpt-4o", temperature=0)
-# Bind responseformatter schema as a tool to the model
-model_with_tools = model.bind_tools([ResponseFormatter])
-# Invoke the model
-ai_msg = model_with_tools.invoke("What is the powerhouse of the cell?")
-```
-
-The arguments of the tool call are already extracted as a dictionary. 
-This dictionary can be optionally parsed into a Pydantic object, matching our original `ResponseFormatter` schema.
-
-```python
-# Get the tool call arguments
-ai_msg.tool_calls[0]["args"]
-{'answer': "The powerhouse of the cell is the mitochondrion. Mitochondria are organelles that generate most of the cell's supply of adenosine triphosphate (ATP), which is used as a source of chemical energy.",
- 'followup_question': 'What is the function of ATP in the cell?'}
-# Parse the dictionary into a pydantic object
-pydantic_object = ResponseFormatter.model_validate(ai_msg.tool_calls[0]["args"])
-```
-
-### JSON mode
-
-In addition to tool calling, some model providers support a feature called `JSON mode`. 
-This supports JSON schema definition as input and enforces the model to produce a conforming JSON output.
-You can find a table of model providers that support JSON mode [here](/docs/integrations/chat/).
-Here is an example of how to use JSON mode with OpenAI:
-
-```python
-from langchain_openai import ChatOpenAI
-model = ChatOpenAI(model="gpt-4o", model_kwargs={ "response_format": { "type": "json_object" } })
-ai_msg = model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
-ai_msg.content
-'\n{\n  "random_ints": [23, 47, 89, 15, 34, 76, 58, 3, 62, 91]\n}'
-```
-
-One important point to flag: the model *still* returns a string, which needs to be parsed into a JSON object.
-This can, of course, simply use the `json` library or a JSON output parser if you need more advanced functionality.
-See this [how-to guide on the JSON output parser](/docs/how_to/output_parser_json) for more details.
-
-```python
-import json
-json_object = json.loads(ai_msg.content)
-{'random_ints': [23, 47, 89, 15, 34, 76, 58, 3, 62, 91]}
-```
-
-## Structured output method 
-
-There are a few challenges when producing structured output with the above methods: 
-
-(1) When tool calling is used, tool call arguments needs to be parsed from a dictionary back to the original schema.  
-
-(2) In addition, the model needs to be instructed to *always* use the tool when we want to enforce structured output, which is a provider specific setting. 
-
-(3) When JSON mode is used, the output needs to be parsed into a JSON object. 
-
-With these challenges in mind, LangChain provides a helper function (`with_structured_output()`) to streamline the process.
-
-![Diagram of with structured output](/img/with_structured_output.png)
-
-This both binds the schema to the model as a tool and parses the output to the specified output schema. 
-
-```python
-# Bind the schema to the model
-model_with_structure = model.with_structured_output(ResponseFormatter)
-# Invoke the model
-structured_output = model_with_structure.invoke("What is the powerhouse of the cell?")
-# Get back the pydantic object
-structured_output
-ResponseFormatter(answer="The powerhouse of the cell is the mitochondrion. Mitochondria are organelles that generate most of the cell's supply of adenosine triphosphate (ATP), which is used as a source of chemical energy.", followup_question='What is the function of ATP in the cell?')
-```
-
-:::info[Further reading]
-
-For more details on usage, see our [how-to guide](/docs/how_to/structured_output/#the-with_structured_output-method).
-
-:::
diff --git a/langchain_md_files/concepts/testing.mdx b/langchain_md_files/concepts/testing.mdx
deleted file mode 100644
index cd0114f31e1b3e5e0a5e3c827cb006e6b8eb6a31..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/testing.mdx
+++ /dev/null
@@ -1,81 +0,0 @@
-# Testing
-<span data-heading-keywords="tests,testing,unit,integration"></span>
-
-Testing is a critical part of the development process that ensures your code works as expected and meets the desired quality standards.
-
-In the LangChain ecosystem, we have 2 main types of tests: **unit tests** and **integration tests**.
-
-For integrations that implement standard LangChain abstractions, we have a set of **standard tests** (both unit and integration) that help maintain compatibility between different components and ensure reliability of high-usage ones.
-
-## Unit Tests
-
-**Definition**: Unit tests are designed to validate the smallest parts of your code—individual functions or methods—ensuring they work as expected in isolation. They do not rely on external systems or integrations.
-
-**Example**: Testing the `convert_langchain_aimessage_to_dict` function to confirm it correctly converts an AI message to a dictionary format:
-
-```python
-from langchain_core.messages import AIMessage, ToolCall, convert_to_openai_messages
-
-def test_convert_to_openai_messages():
-    ai_message = AIMessage(
-        content="Let me call that tool for you!",
-        tool_calls=[
-            ToolCall(name='parrot_multiply_tool', id='1', args={'a': 2, 'b': 3}),
-        ]
-    )
-    
-    result = convert_to_openai_messages(ai_message)
-    
-    expected = {
-        "role": "assistant",
-        "tool_calls": [
-            {
-                "type": "function",
-                "id": "1",
-                "function": {
-                    "name": "parrot_multiply_tool",
-                    "arguments": '{"a": 2, "b": 3}',
-                },
-            }
-        ],
-        "content": "Let me call that tool for you!",
-    }
-    assert result == expected  # Ensure conversion matches expected output
-```
-
----
-
-## Integration Tests
-
-**Definition**: Integration tests validate that multiple components or systems work together as expected. For tools or integrations relying on external services, these tests often ensure end-to-end functionality.
-
-**Example**: Testing `ParrotMultiplyTool` with access to an API service that multiplies two numbers and adds 80:
-
-```python
-def test_integration_with_service():
-    tool = ParrotMultiplyTool()
-    result = tool.invoke({"a": 2, "b": 3})
-    assert result == 86
-```
-
----
-
-## Standard Tests
-
-**Definition**: Standard tests are pre-defined tests provided by LangChain to ensure consistency and reliability across all tools and integrations. They include both unit and integration test templates tailored for LangChain components.
-
-**Example**: Subclassing LangChain's `ToolsUnitTests` or `ToolsIntegrationTests` to automatically run standard tests:
-
-```python
-from langchain_tests.unit_tests import ToolsUnitTests
-
-class TestParrotMultiplyToolUnit(ToolsUnitTests):
-    @property
-    def tool_constructor(self):
-        return ParrotMultiplyTool
-
-    def tool_invoke_params_example(self):
-        return {"a": 2, "b": 3}
-```
-
-To learn more, check out our guide on [how to add standard tests to an integration](../../contributing/how_to/integrations/standard_tests).
diff --git a/langchain_md_files/concepts/text_llms.mdx b/langchain_md_files/concepts/text_llms.mdx
deleted file mode 100644
index d35a72476af3e5d8da3e713449a688a8b54d2a5f..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/text_llms.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
-# String-in, string-out llms
-
-:::tip
-You are probably looking for the [Chat Model Concept Guide](/docs/concepts/chat_models) page for more information.
-:::
-
-LangChain has implementations for older language models that take a string as input and return a string as output. These models are typically named without the "Chat" prefix (e.g., `Ollama`, `Anthropic`, `OpenAI`, etc.), and may include the "LLM" suffix (e.g., `OllamaLLM`, `AnthropicLLM`, `OpenAILLM`, etc.). These models implement the [BaseLLM](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.llms.BaseLLM.html#langchain_core.language_models.llms.BaseLLM) interface.
-
-Users should be using almost exclusively the newer [Chat Models](/docs/concepts/chat_models) as most
-model providers have adopted a chat like interface for interacting with language models.
\ No newline at end of file
diff --git a/langchain_md_files/concepts/text_splitters.mdx b/langchain_md_files/concepts/text_splitters.mdx
deleted file mode 100644
index 335b854ec500afab87c16d6d9dedad4309ac645d..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/text_splitters.mdx
+++ /dev/null
@@ -1,135 +0,0 @@
-# Text splitters
-<span data-heading-keywords="text splitter,text splitting"></span>
-
-:::info[Prerequisites]
-
-* [Documents](/docs/concepts/retrievers/#interface)
-* Tokenization(/docs/concepts/tokens)
-:::
-
-## Overview
-
-Document splitting is often a crucial preprocessing step for many applications.
-It involves breaking down large texts into smaller, manageable chunks.
-This process offers several benefits, such as ensuring consistent processing of varying document lengths, overcoming input size limitations of models, and improving the quality of text representations used in retrieval systems.
-There are several strategies for splitting documents, each with its own advantages.
-
-## Key concepts
-
-![Conceptual Overview](/img/text_splitters.png)
-
-Text splitters split documents into smaller chunks for use in downstream applications.
-
-## Why split documents?
-
-There are several reasons to split documents:
-
-- **Handling non-uniform document lengths**: Real-world document collections often contain texts of varying sizes. Splitting ensures consistent processing across all documents.
-- **Overcoming model limitations**: Many embedding models and language models have maximum input size constraints. Splitting allows us to process documents that would otherwise exceed these limits.
-- **Improving representation quality**: For longer documents, the quality of embeddings or other representations may degrade as they try to capture too much information. Splitting can lead to more focused and accurate representations of each section.
-- **Enhancing retrieval precision**: In information retrieval systems, splitting can improve the granularity of search results, allowing for more precise matching of queries to relevant document sections.
-- **Optimizing computational resources**: Working with smaller chunks of text can be more memory-efficient and allow for better parallelization of processing tasks.
-
-Now, the next question is *how* to split the documents into chunks! There are several strategies, each with its own advantages.
-
-:::info[Further reading]
-* See Greg Kamradt's [chunkviz](https://chunkviz.up.railway.app/) to visualize different splitting strategies discussed below.
-:::
-
-## Approaches
-
-### Length-based
-
-The most intuitive strategy is to split documents based on their length. This simple yet effective approach ensures that each chunk doesn't exceed a specified size limit.
-Key benefits of length-based splitting:
-- Straightforward implementation
-- Consistent chunk sizes
-- Easily adaptable to different model requirements
-
-Types of length-based splitting:
-- **Token-based**: Splits text based on the number of tokens, which is useful when working with language models.
-- **Character-based**: Splits text based on the number of characters, which can be more consistent across different types of text.
-
-Example implementation using LangChain's `CharacterTextSplitter` with token-based splitting:
-
-```python
-from langchain_text_splitters import CharacterTextSplitter
-text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
-    encoding_name="cl100k_base", chunk_size=100, chunk_overlap=0
-)
-texts = text_splitter.split_text(document)
-```
-
-:::info[Further reading]
-
-* See the how-to guide for [token-based](/docs/how_to/split_by_token/) splitting.
-* See the how-to guide for [character-based](/docs/how_to/character_text_splitter/) splitting.
-
-:::
-
-### Text-structured based
-
-Text is naturally organized into hierarchical units such as paragraphs, sentences, and words. 
-We can leverage this inherent structure to inform our splitting strategy, creating split that maintain natural language flow, maintain semantic coherence within split, and adapts to varying levels of text granularity.
-LangChain's [`RecursiveCharacterTextSplitter`](/docs/how_to/recursive_text_splitter/) implements this concept:
-- The `RecursiveCharacterTextSplitter` attempts to keep larger units (e.g., paragraphs) intact.
-- If a unit exceeds the chunk size, it moves to the next level (e.g., sentences).
-- This process continues down to the word level if necessary.
-
-Here is example usage:
-
-```python
-from langchain_text_splitters import RecursiveCharacterTextSplitter
-text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=0)
-texts = text_splitter.split_text(document)
-```
-
-:::info[Further reading]
-
-* See the how-to guide for [recursive text splitting](/docs/how_to/recursive_text_splitter/).
-
-:::
-
-### Document-structured based
-
-Some documents have an inherent structure, such as HTML, Markdown, or JSON files. 
-In these cases, it's beneficial to split the document based on its structure, as it often naturally groups semantically related text.
-Key benefits of structure-based splitting:
-- Preserves the logical organization of the document
-- Maintains context within each chunk
-- Can be more effective for downstream tasks like retrieval or summarization
-
-Examples of structure-based splitting:
-- **Markdown**: Split based on headers (e.g., #, ##, ###)
-- **HTML**: Split using tags
-- **JSON**: Split by object or array elements
-- **Code**: Split by functions, classes, or logical blocks
-
-:::info[Further reading]
-
-* See the how-to guide for [Markdown splitting](/docs/how_to/markdown_header_metadata_splitter/).
-* See the how-to guide for [Recursive JSON splitting](/docs/how_to/recursive_json_splitter/).
-* See the how-to guide for [Code splitting](/docs/how_to/code_splitter/).
-* See the how-to guide for [HTML splitting](/docs/how_to/split_html/).
-
-:::
-
-### Semantic meaning based
-
-Unlike the previous methods, semantic-based splitting actually considers the *content* of the text. 
-While other approaches use document or text structure as proxies for semantic meaning, this method directly analyzes the text's semantics.
-There are several ways to implement this, but conceptually the approach is split text when there are significant changes in text *meaning*.
-As an example, we can use a sliding window approach to generate embeddings, and compare the embeddings to find significant differences:
-
-- Start with the first few sentences and generate an embedding.
-- Move to the next group of sentences and generate another embedding (e.g., using a sliding window approach).
-- Compare the embeddings to find significant differences, which indicate potential "break points" between semantic sections.
-
-This technique helps create chunks that are more semantically coherent, potentially improving the quality of downstream tasks like retrieval or summarization.
-
-:::info[Further reading]
-
-* See the how-to guide for [splitting text based on semantic meaning](/docs/how_to/semantic-chunker/).
-* See Greg Kamradt's [notebook](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) showcasing semantic splitting.
-
-:::
diff --git a/langchain_md_files/concepts/tokens.mdx b/langchain_md_files/concepts/tokens.mdx
deleted file mode 100644
index d42755e8d561a6828ef4a3ae91f54a7107b686a9..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/tokens.mdx
+++ /dev/null
@@ -1,58 +0,0 @@
-# Tokens
-
-Modern large language models (LLMs) are typically based on a transformer architecture that processes a sequence of units known as tokens. Tokens are the fundamental elements that models use to break down input and generate output. In this section, we'll discuss what tokens are and how they are used by language models.
-
-## What is a token?
-
-A **token** is the basic unit that a language model reads, processes, and generates. These units can vary based on how the model provider defines them, but in general, they could represent:
-
-* A whole word (e.g., "apple"),
-* A part of a word (e.g., "app"),
-* Or other linguistic components such as punctuation or spaces.
-
-The way the model tokenizes the input depends on its **tokenizer algorithm**, which converts the input into tokens. Similarly, the model’s output comes as a stream of tokens, which is then decoded back into human-readable text.
-
-## How tokens work in language models
-
-The reason language models use tokens is tied to how they understand and predict language. Rather than processing characters or entire sentences directly, language models focus on **tokens**, which represent meaningful linguistic units. Here's how the process works:
-
-1. **Input Tokenization**: When you provide a model with a prompt (e.g., "LangChain is cool!"), the tokenizer algorithm splits the text into tokens. For example, the sentence could be tokenized into parts like `["Lang", "Chain", " is", " cool", "!"]`. Note that token boundaries don’t always align with word boundaries.
-    ![](/img/tokenization.png)
-
-2. **Processing**: The transformer architecture behind these models processes tokens sequentially to predict the next token in a sentence. It does this by analyzing the relationships between tokens, capturing context and meaning from the input.
-3. **Output Generation**: The model generates new tokens one by one. These output tokens are then decoded back into human-readable text.
-
-Using tokens instead of raw characters allows the model to focus on linguistically meaningful units, which helps it capture grammar, structure, and context more effectively.
-
-## Tokens don’t have to be text
-
-Although tokens are most commonly used to represent text, they don’t have to be limited to textual data. Tokens can also serve as abstract representations of **multi-modal data**, such as:
-
-- **Images**,
-- **Audio**,
-- **Video**,
-- And other types of data.
-
-At the time of writing, virtually no models support **multi-modal output**, and only a few models can handle **multi-modal inputs** (e.g., text combined with images or audio). However, as advancements in AI continue, we expect **multi-modality** to become much more common. This would allow models to process and generate a broader range of media, significantly expanding the scope of what tokens can represent and how models can interact with diverse types of data.
-
-:::note
-In principle, **anything that can be represented as a sequence of tokens** could be modeled in a similar way. For example, **DNA sequences**—which are composed of a series of nucleotides (A, T, C, G)—can be tokenized and modeled to capture patterns, make predictions, or generate sequences. This flexibility allows transformer-based models to handle diverse types of sequential data, further broadening their potential applications across various domains, including bioinformatics, signal processing, and other fields that involve structured or unstructured sequences.
-:::
-
-Please see the [multimodality](/docs/concepts/multimodality) section for more information on multi-modal inputs and outputs.
-
-## Why not use characters?
-
-Using tokens instead of individual characters makes models both more efficient and better at understanding context and grammar. Tokens represent meaningful units, like whole words or parts of words, allowing models to capture language structure more effectively than by processing raw characters. Token-level processing also reduces the number of units the model has to handle, leading to faster computation.
-
-In contrast, character-level processing would require handling a much larger sequence of input, making it harder for the model to learn relationships and context. Tokens enable models to focus on linguistic meaning, making them more accurate and efficient in generating responses.
-
-## How tokens correspond to text
-
-Please see this post from [OpenAI](https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them) for more details on how tokens are counted and how they correspond to text.
-
-According to the OpenAI post, the approximate token counts for English text are as follows:
-
-* 1 token ~= 4 chars in English
-* 1 token ~= ¾ words
-* 100 tokens ~= 75 words
\ No newline at end of file
diff --git a/langchain_md_files/concepts/tool_calling.mdx b/langchain_md_files/concepts/tool_calling.mdx
deleted file mode 100644
index 353eb69170ae0653d86990b17cf0a6bd7134278e..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/tool_calling.mdx
+++ /dev/null
@@ -1,149 +0,0 @@
-# Tool calling
-
-:::info[Prerequisites]
-* [Tools](/docs/concepts/tools)
-* [Chat Models](/docs/concepts/chat_models)
-:::
-
-
-## Overview 
-
-Many AI applications interact directly with humans. In these cases, it is appropriate for models to respond in natural language.
-But what about cases where we want a model to also interact *directly* with systems, such as databases or an API?
-These systems often have a particular input schema; for example, APIs frequently have a required payload structure.
-This need motivates the concept of *tool calling*. You can use [tool calling](https://platform.openai.com/docs/guides/function-calling/example-use-cases) to request model responses that match a particular schema.
-
-:::info
-You will sometimes hear the term `function calling`. We use this term interchangeably with `tool calling`. 
-:::
-
-![Conceptual overview of tool calling](/img/tool_calling_concept.png)
-
-## Key concepts 
-
-**(1) Tool Creation:** Use the [@tool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html) decorator to create a [tool](/docs/concepts/tools). A tool is an association between a function and its schema.
-**(2) Tool Binding:** The tool needs to be connected to a model that supports tool calling. This gives the model awareness of the tool and the associated input schema required by the tool.
-**(3) Tool Calling:** When appropriate, the model can decide to call a tool and ensure its response conforms to the tool's input schema.
-**(4) Tool Execution:** The tool can be executed using the arguments provided by the model.
-
-![Conceptual parts of tool calling](/img/tool_calling_components.png)
-
-## Recommended usage
-
-This pseudo-code illustrates the recommended workflow for using tool calling. 
-Created tools are passed to `.bind_tools()` method as a list.
-This model can be called, as usual. If a tool call is made, model's response will contain the tool call arguments.
-The tool call arguments can be passed directly to the tool.
-
-```python
-# Tool creation
-tools = [my_tool]
-# Tool binding
-model_with_tools = model.bind_tools(tools)
-# Tool calling 
-response = model_with_tools.invoke(user_input)
-```
-
-## Tool creation
-
-The recommended way to create a tool is using the `@tool` decorator.
-
-```python
-from langchain_core.tools import tool
-
-@tool
-def multiply(a: int, b: int) -> int:
-    """Multiply a and b."""
-    return a * b
-```
-
-:::info[Further reading]
-
-* See our conceptual guide on [tools](/docs/concepts/tools/) for more details.
-* See our [model integrations](/docs/integrations/chat/) that support tool calling.
-* See our [how-to guide](/docs/how_to/tool_calling/) on tool calling.
-
-:::
-
-## Tool binding 
-
-[Many](https://platform.openai.com/docs/guides/function-calling) [model providers](https://platform.openai.com/docs/guides/function-calling) support tool calling. 
-
-:::tip
-See our [model integration page](/docs/integrations/chat/) for a list of providers that support tool calling.
-:::
-
-The central concept to understand is that LangChain provides a standardized interface for connecting tools to models. 
-The `.bind_tools()` method can be used to specify which tools are available for a model to call. 
-
-```python
-model_with_tools = model.bind_tools(tools_list)
-```
-
-As a specific example, let's take a function `multiply` and bind it as a tool to a model that supports tool calling.
-
-```python
-def multiply(a: int, b: int) -> int:
-    """Multiply a and b.
-
-    Args:
-        a: first int
-        b: second int
-    """
-    return a * b
-
-llm_with_tools = tool_calling_model.bind_tools([multiply])
-```
-
-## Tool calling
-
-![Diagram of a tool call by a model](/img/tool_call_example.png)
-
-A key principle of tool calling is that the model decides when to use a tool based on the input's relevance. The model doesn't always need to call a tool.
-For example, given an unrelated input, the model would not call the tool:
-
-```python
-result = llm_with_tools.invoke("Hello world!")
-```
-
-The result would be an `AIMessage` containing the model's response in natural language (e.g., "Hello!").
-However, if we pass an input *relevant to the tool*, the model should choose to call it:
-
-```python
-result = llm_with_tools.invoke("What is 2 multiplied by 3?")
-```
-
-As before, the output `result` will be an `AIMessage`. 
-But, if the tool was called, `result` will have a `tool_calls` attribute.
-This attribute includes everything needed to execute the tool, including the tool name and input arguments:
-
-```
-result.tool_calls
-{'name': 'multiply', 'args': {'a': 2, 'b': 3}, 'id': 'xxx', 'type': 'tool_call'}
-```
-
-For more details on usage, see our [how-to guides](/docs/how_to/#tools)!
-
-## Tool execution
-
-[Tools](/docs/concepts/tools/) implement the [Runnable](/docs/concepts/runnables/) interface, which means that they can be invoked (e.g., `tool.invoke(args)`) directly.
-
-[LangGraph](https://langchain-ai.github.io/langgraph/) offers pre-built components (e.g., [`ToolNode`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.tool_node.ToolNode)) that will often invoke the tool in behalf of the user.
-
-:::info[Further reading]
-
-* See our [how-to guide](/docs/how_to/tool_calling/) on tool calling.
-* See the [LangGraph documentation on using ToolNode](https://langchain-ai.github.io/langgraph/how-tos/tool-calling/).
-
-:::
-
-## Best practices
-
-When designing [tools](/docs/concepts/tools/) to be used by a model, it is important to keep in mind that:
-
-* Models that have explicit [tool-calling APIs](/docs/concepts/tool_calling) will be better at tool calling than non-fine-tuned models.
-* Models will perform better if the tools have well-chosen names and descriptions.
-* Simple, narrowly scoped tools are easier for models to use than complex tools.
-* Asking the model to select from a large list of tools poses challenges for the model.
-
-
diff --git a/langchain_md_files/concepts/tools.mdx b/langchain_md_files/concepts/tools.mdx
deleted file mode 100644
index c459a5973b9b3efd61cefb017df66b04509c814c..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/tools.mdx
+++ /dev/null
@@ -1,211 +0,0 @@
-# Tools
-
-:::info Prerequisites
-- [Chat models](/docs/concepts/chat_models/)
-:::
-
-## Overview
-
-The **tool** abstraction in LangChain associates a Python **function** with a **schema** that defines the function's **name**, **description** and **expected arguments**. 
-
-**Tools** can be passed to [chat models](/docs/concepts/chat_models) that support [tool calling](/docs/concepts/tool_calling) allowing the model to request the execution of a specific function with specific inputs.
-
-## Key concepts
-
-- Tools are a way to encapsulate a function and its schema in a way that can be passed to a chat model.
-- Create tools using the [@tool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html) decorator, which simplifies the process of tool creation, supporting the following:
-   - Automatically infer the tool's **name**, **description** and **expected arguments**, while also supporting customization.
-   - Defining tools that return **artifacts** (e.g. images, dataframes, etc.)
-   - Hiding input arguments from the schema (and hence from the model) using **injected tool arguments**.
-
-## Tool interface
-
-The tool interface is defined in the [BaseTool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.base.BaseTool.html#langchain_core.tools.base.BaseTool) class which is a subclass of the [Runnable Interface](/docs/concepts/runnables).
-
-The key attributes that correspond to the tool's **schema**:
-
-- **name**: The name of the tool.
-- **description**: A description of what the tool does.
-- **args**: Property that returns the JSON schema for the tool's arguments.
-
-The key methods to execute the function associated with the **tool**:
-
-- **invoke**: Invokes the tool with the given arguments.
-- **ainvoke**: Invokes the tool with the given arguments, asynchronously. Used for [async programming with Langchain](/docs/concepts/async).
-
-## Create tools using the `@tool` decorator
-
-The recommended way to create tools is using the [@tool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html) decorator. This decorator is designed to simplify the process of tool creation and should be used in most cases. After defining a function, you can decorate it with [@tool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html) to create a tool that implements the [Tool Interface](#tool-interface).
-
-```python
-from langchain_core.tools import tool
-
-@tool
-def multiply(a: int, b: int) -> int:
-   """Multiply two numbers."""
-   return a * b
-```
-
-For more details on how to create tools, see the [how to create custom tools](/docs/how_to/custom_tools/) guide.
-
-:::note
-LangChain has a few other ways to create tools; e.g., by sub-classing the [BaseTool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.base.BaseTool.html#langchain_core.tools.base.BaseTool) class or by using `StructuredTool`. These methods are shown in the [how to create custom tools guide](/docs/how_to/custom_tools/), but
-we generally recommend using the `@tool` decorator for most cases.
-:::
-
-## Use the tool directly
-
-Once you have defined a tool, you can use it directly by calling the function. For example, to use the `multiply` tool defined above:
-
-```python
-multiply.invoke({"a": 2, "b": 3})
-```
-
-### Inspect
-
-You can also inspect the tool's schema and other properties:
-
-```python
-print(multiply.name) # multiply
-print(multiply.description) # Multiply two numbers.
-print(multiply.args) 
-# {
-# 'type': 'object', 
-# 'properties': {'a': {'type': 'integer'}, 'b': {'type': 'integer'}}, 
-# 'required': ['a', 'b']
-# }
-```
-
-:::note
-If you're using pre-built LangChain or LangGraph components like [create_react_agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.chat_agent_executor.create_react_agent),you might not need to interact with tools directly. However, understanding how to use them can be valuable for debugging and testing. Additionally, when building custom LangGraph workflows, you may find it necessary to work with tools directly.
-:::
-
-## Configuring the schema
-
-The `@tool` decorator offers additional options to configure the schema of the tool (e.g., modify name, description
-or parse the function's doc-string to infer the schema).
-
-Please see the [API reference for @tool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html) for more details and review the [how to create custom tools](/docs/how_to/custom_tools/) guide for examples.
-
-## Tool artifacts
-
-**Tools** are utilities that can be called by a model, and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns a custom object, a dataframe or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.
-
-```python
-@tool(response_format="content_and_artifact")
-def some_tool(...) -> Tuple[str, Any]:
-    """Tool that does something."""
-    ...
-    return 'Message for chat model', some_artifact 
-```
-
-See [how to return artifacts from tools](/docs/how_to/tool_artifacts/) for more details.
-
-## Special type annotations
-
-There are a number of special type annotations that can be used in the tool's function signature to configure the run time behavior of the tool.
-
-The following type annotations will end up **removing** the argument from the tool's schema. This can be useful for arguments that should not be exposed to the model and that the model should not be able to control.
-
-- **InjectedToolArg**: Value should be injected manually at runtime using `.invoke` or `.ainvoke`.
-- **RunnableConfig**: Pass in the RunnableConfig object to the tool.
-- **InjectedState**: Pass in the overall state of the LangGraph graph to the tool.
-- **InjectedStore**: Pass in the LangGraph store object to the tool.
-
-You can also use the `Annotated` type with a string literal to provide a **description** for the corresponding argument that **WILL** be exposed in the tool's schema.
-
-- **Annotated[..., "string literal"]** -- Adds a description to the argument that will be exposed in the tool's schema.
-
-### InjectedToolArg
-
-There are cases where certain arguments need to be passed to a tool at runtime but should not be generated by the model itself. For this, we use the `InjectedToolArg` annotation, which allows certain parameters to be hidden from the tool's schema.
-
-For example, if a tool requires a `user_id` to be injected dynamically at runtime, it can be structured in this way:
-
-```python
-from langchain_core.tools import tool, InjectedToolArg
-
-@tool
-def user_specific_tool(input_data: str, user_id: InjectedToolArg) -> str:
-    """Tool that processes input data."""
-    return f"User {user_id} processed {input_data}"
-```
-
-Annotating the `user_id` argument with `InjectedToolArg` tells LangChain that this argument should not be exposed as part of the
-tool's schema.
-
-See [how to pass run time values to tools](/docs/how_to/tool_runtime/) for more details on how to use `InjectedToolArg`.  
-
-
-### RunnableConfig
-
-You can use the `RunnableConfig` object to pass custom run time values to tools.
-
-If you need to access the [RunnableConfig](/docs/concepts/runnables/#runnableconfig) object from within a tool. This can be done by using the `RunnableConfig` annotation in the tool's function signature.
-
-```python
-from langchain_core.runnables import RunnableConfig
-
-@tool
-async def some_func(..., config: RunnableConfig) -> ...:
-    """Tool that does something."""
-    # do something with config
-    ...
-
-await some_func.ainvoke(..., config={"configurable": {"value": "some_value"}})
-```
-
-The `config` will not be part of the tool's schema and will be injected at runtime with appropriate values.
-
-:::note
-You may need to access the `config` object to manually propagate it to subclass. This happens if you're working with python 3.9 / 3.10 in an [async](/docs/concepts/async) environment and need to manually propagate the `config` object to sub-calls.
-
-Please read [Propagation RunnableConfig](/docs/concepts/runnables/#propagation-of-runnableconfig) for more details to learn how to propagate the `RunnableConfig` down the call chain manually (or upgrade to Python 3.11 where this is no longer an issue).
-:::
-
-### InjectedState
-
-Please see the [InjectedState](https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.tool_node.InjectedState) documentation for more details.
-
-### InjectedStore
-
-Please see the [InjectedStore](https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.tool_node.InjectedStore) documentation for more details.
-
-## Best practices
-
-When designing tools to be used by models, keep the following in mind:
-
-- Tools that are well-named, correctly-documented and properly type-hinted are easier for models to use.
-- Design simple and narrowly scoped tools, as they are easier for models to use correctly.
-- Use chat models that support [tool-calling](/docs/concepts/tool_calling) APIs to take advantage of tools.
-
-
-## Toolkits
-<span data-heading-keywords="toolkit,toolkits"></span>
-
-LangChain has a concept of **toolkits**. This a very thin abstraction that groups tools together that
-are designed to be used together for specific tasks.
-
-### Interface
-
-All Toolkits expose a `get_tools` method which returns a list of tools. You can therefore do:
-
-```python
-# Initialize a toolkit
-toolkit = ExampleTookit(...)
-
-# Get list of tools
-tools = toolkit.get_tools()
-```
-
-## Related resources
-
-See the following resources for more information:
-
-- [API Reference for @tool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html)
-- [How to create custom tools](/docs/how_to/custom_tools/)
-- [How to pass run time values to tools](/docs/how_to/tool_runtime/)
-- [All LangChain tool how-to guides](https://docs.langchain.com/docs/how_to/#tools)
-- [Additional how-to guides that show usage with LangGraph](https://langchain-ai.github.io/langgraph/how-tos/tool-calling/)
-- Tool integrations, see the [tool integration docs](https://docs.langchain.com/docs/integrations/tools/).
-
diff --git a/langchain_md_files/concepts/tracing.mdx b/langchain_md_files/concepts/tracing.mdx
deleted file mode 100644
index 659992eeb95737e0c11312bba1de7d08959ebd88..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/tracing.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
-# Tracing
-
-<span data-heading-keywords="trace,tracing"></span>
-
-A trace is essentially a series of steps that your application takes to go from input to output.
-Traces contain individual steps called `runs`. These can be individual calls from a model, retriever,
-tool, or sub-chains.
-Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
-
-For a deeper dive, check out [this LangSmith conceptual guide](https://docs.smith.langchain.com/concepts/tracing).
diff --git a/langchain_md_files/concepts/vectorstores.mdx b/langchain_md_files/concepts/vectorstores.mdx
deleted file mode 100644
index 16cc23ba79716462bcc165bb155bdbe2bfc51750..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/vectorstores.mdx
+++ /dev/null
@@ -1,191 +0,0 @@
-# Vector stores
-<span data-heading-keywords="vector,vectorstore,vectorstores,vector store,vector stores"></span>
-
-:::info[Prerequisites]
-
-* [Embeddings](/docs/concepts/embedding_models/)
-* [Text splitters](/docs/concepts/text_splitters/)
-
-:::
-:::info[Note]
-
-This conceptual overview focuses on text-based indexing and retrieval for simplicity. 
-However, embedding models can be [multi-modal](https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-multimodal-embeddings)
-and vector stores can be used to store and retrieve a variety of data types beyond text.
-:::
-
-## Overview
-
-Vector stores are specialized data stores that enable indexing and retrieving information based on vector representations.
-
-These vectors, called [embeddings](/docs/concepts/embedding_models/), capture the semantic meaning of data that has been embedded.
-
-Vector stores are frequently used to search over unstructured data, such as text, images, and audio, to retrieve relevant information based on semantic similarity rather than exact keyword matches.
-
-![Vector stores](/img/vectorstores.png)
-
-## Integrations
-
-LangChain has a large number of vectorstore integrations, allowing users to easily switch between different vectorstore implementations.
-
-Please see the [full list of LangChain vectorstore integrations](/docs/integrations/vectorstores/).
-
-## Interface
-
-LangChain provides a standard interface for working with vector stores, allowing users to easily switch between different vectorstore implementations.
-
-The interface consists of basic methods for writing, deleting and searching for documents in the vector store.
-
-The key methods are:
-
-- `add_documents`: Add a list of texts to the vector store.
-- `delete`: Delete a list of documents from the vector store.
-- `similarity_search`: Search for similar documents to a given query.
-
-
-## Initialization
-
-Most vectors in LangChain accept an embedding model as an argument when initializing the vector store.
-
-We will use LangChain's [InMemoryVectorStore](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.in_memory.InMemoryVectorStore.html) implementation to illustrate the API.
-
-```python
-from langchain_core.vectorstores import InMemoryVectorStore
-# Initialize with an embedding model
-vector_store = InMemoryVectorStore(embedding=SomeEmbeddingModel())
-```
-
-## Adding documents
-
-To add documents, use the `add_documents` method.
-
-This API works with a list of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects.
-`Document` objects all have `page_content` and `metadata` attributes, making them a universal way to store unstructured text and associated metadata.
-
-```python
-from langchain_core.documents import Document
-
-document_1 = Document(
-    page_content="I had chocalate chip pancakes and scrambled eggs for breakfast this morning.",
-    metadata={"source": "tweet"},
-)
-
-document_2 = Document(
-    page_content="The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees.",
-    metadata={"source": "news"},
-)
-
-documents = [document_1, document_2]
-
-vector_store.add_documents(documents=documents)
-```
-
-You should usually provide IDs for the documents you add to the vector store, so
-that instead of adding the same document multiple times, you can update the existing document.
-
-```python
-vector_store.add_documents(documents=documents, ids=["doc1", "doc2"])
-```
-
-## Delete
-
-To delete documents, use the `delete` method which takes a list of document IDs to delete.
-
-```python
-vector_store.delete(ids=["doc1"])
-```
-
-## Search
-
-Vector stores embed and store the documents that added.
-If we pass in a query, the vectorstore will embed the query, perform a similarity search over the embedded documents, and return the most similar ones.
-This captures two important concepts: first, there needs to be a way to measure the similarity between the query and *any* [embedded](/docs/concepts/embedding_models/) document.
-Second, there needs to be an algorithm to efficiently perform this similarity search across *all* embedded documents.
-
-### Similarity metrics
-
-A critical advantage of embeddings vectors is they can be compared using many simple mathematical operations:
-
-- **Cosine Similarity**: Measures the cosine of the angle between two vectors.
-- **Euclidean Distance**: Measures the straight-line distance between two points.
-- **Dot Product**: Measures the projection of one vector onto another.
-
-The choice of similarity metric can sometimes be selected when initializing the vectorstore. Please refer
-to the documentation of the specific vectorstore you are using to see what similarity metrics are supported.
-
-:::info[Further reading]
-
-* See [this documentation](https://developers.google.com/machine-learning/clustering/dnn-clustering/supervised-similarity) from Google on similarity metrics to consider with embeddings.
-* See Pinecone's [blog post](https://www.pinecone.io/learn/vector-similarity/) on similarity metrics.
-* See OpenAI's [FAQ](https://platform.openai.com/docs/guides/embeddings/faq) on what similarity metric to use with OpenAI embeddings.
-
-:::
-
-### Similarity search
-
-Given a similarity metric to measure the distance between the embedded query and any embedded document, we need an algorithm to efficiently search over *all* the embedded documents to find the most similar ones.
-There are various ways to do this. As an example, many vectorstores implement [HNSW (Hierarchical Navigable Small World)](https://www.pinecone.io/learn/series/faiss/hnsw/), a graph-based index structure that allows for efficient similarity search.
-Regardless of the search algorithm used under the hood, the LangChain vectorstore interface has a `similarity_search` method for all integrations. 
-This will take the search query, create an embedding, find similar documents, and return them as a list of [Documents](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html).
-
-```python
-query = "my query"
-docs = vectorstore.similarity_search(query)
-```
-
-Many vectorstores support search parameters to be passed with the `similarity_search` method. See the documentation for the specific vectorstore you are using to see what parameters are supported.
-As an example [Pinecone](https://python.langchain.com/api_reference/pinecone/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html#langchain_pinecone.vectorstores.PineconeVectorStore.similarity_search) several parameters that are important general concepts:
-Many vectorstores support [the `k`](/docs/integrations/vectorstores/pinecone/#query-directly), which controls the number of Documents to return, and `filter`, which allows for filtering documents by metadata.
-
-- `query (str) – Text to look up documents similar to.`
-- `k (int) – Number of Documents to return. Defaults to 4.`
-- `filter (dict | None) – Dictionary of argument(s) to filter on metadata`
-
-:::info[Further reading]
-
-* See the [how-to guide](/docs/how_to/vectorstores/) for more details on how to use the `similarity_search` method.
-* See the [integrations page](/docs/integrations/vectorstores/) for more details on arguments that can be passed in to the `similarity_search` method for specific vectorstores.
-
-:::
-
-### Metadata filtering
-
-While vectorstore implement a search algorithm to efficiently search over *all* the embedded documents to find the most similar ones, many also support filtering on metadata.
-Metadata filtering helps narrow down the search by applying specific conditions such as retrieving documents from a particular source or date range. These two concepts work well together:
-
-1. **Semantic search**: Query the unstructured data directly, often via embedding or keyword similarity.
-2. **Metadata search**: Apply structured query to the metadata, filtering specific documents.
-
-Vector store support for metadata filtering is typically dependent on the underlying vector store implementation.
-
-Here is example usage with [Pinecone](/docs/integrations/vectorstores/pinecone/#query-directly), showing that we filter for all documents that have the metadata key `source` with value `tweet`.
-
-```python
-vectorstore.similarity_search(
-    "LangChain provides abstractions to make working with LLMs easy",
-    k=2,
-    filter={"source": "tweet"},
-)
-```  
-
-:::info[Further reading]
-
-* See Pinecone's [documentation](https://docs.pinecone.io/guides/data/filter-with-metadata) on filtering with metadata.
-* See the [list of LangChain vectorstore integrations](/docs/integrations/retrievers/self_query/) that support metadata filtering.
-
-:::
-
-## Advanced search and retrieval techniques
-
-While algorithms like HNSW provide the foundation for efficient similarity search in many cases, additional techniques can be employed to improve search quality and diversity.
-For example, [maximal marginal relevance](https://python.langchain.com/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/) is a re-ranking algorithm used to diversify search results, which is applied after the initial similarity search to ensure a more diverse set of results.
-As a second example, some [vector stores](/docs/integrations/retrievers/pinecone_hybrid_search/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity search, which marries the benefits of both approaches. 
-At the moment, there is no unified way to perform hybrid search using LangChain vectorstores, but it is generally exposed as a keyword argument that is passed in with `similarity_search`.
-See this [how-to guide on hybrid search](/docs/how_to/hybrid/) for more details.
-
-| Name                                                                                                              | When to use                                           | Description                                                                                                                                  |
-|-------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
-| [Hybrid search](/docs/integrations/retrievers/pinecone_hybrid_search/)                                            | When combining keyword-based and semantic similarity. | Hybrid search combines keyword and semantic similarity, marrying the benefits of both approaches. [Paper](https://arxiv.org/abs/2210.11934). |
-| [Maximal Marginal Relevance (MMR)](https://python.langchain.com/api_reference/pinecone/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html#langchain_pinecone.vectorstores.PineconeVectorStore.max_marginal_relevance_search) | When needing to diversify search results.             | MMR attempts to diversify the results of a search to avoid returning similar and redundant documents.                                        |
-
- 
diff --git a/langchain_md_files/concepts/why_langchain.mdx b/langchain_md_files/concepts/why_langchain.mdx
deleted file mode 100644
index 1713222611c7d0a9cdee5c3c6ec7ecf54e584aff..0000000000000000000000000000000000000000
--- a/langchain_md_files/concepts/why_langchain.mdx
+++ /dev/null
@@ -1,109 +0,0 @@
-# Why LangChain?
-
-The goal of `langchain` the Python package and LangChain the company is to make it as easy as possible for developers to build applications that reason.
-While LangChain originally started as a single open source package, it has evolved into a company and a whole ecosystem.
-This page will talk about the LangChain ecosystem as a whole.
-Most of the components within the LangChain ecosystem can be used by themselves - so if you feel particularly drawn to certain components but not others, that is totally fine! Pick and choose whichever components you like best for your own use case!
-
-## Features
-
-There are several primary needs that LangChain aims to address:
-
-1. **Standardized component interfaces:** The growing number of [models](/docs/integrations/chat/) and [related components](/docs/integrations/vectorstores/) for AI applications has resulted in a wide variety of different APIs that developers need to learn and use.
-This diversity can make it challenging for developers to switch between providers or combine components when building applications.
-LangChain exposes a standard interface for key components, making it easy to switch between providers.
-
-2. **Orchestration:** As applications become more complex, combining multiple components and models, there's [a growing need to efficiently connect these elements into control flows](https://lilianweng.github.io/posts/2023-06-23-agent/) that can [accomplish diverse tasks](https://www.sequoiacap.com/article/generative-ais-act-o1/).
-[Orchestration](https://en.wikipedia.org/wiki/Orchestration_(computing)) is crucial for building such applications.
-
-3. **Observability and evaluation:** As applications become more complex, it becomes increasingly difficult to understand what is happening within them.
-Furthermore, the pace of development can become rate-limited by the [paradox of choice](https://en.wikipedia.org/wiki/Paradox_of_choice).
-For example, developers often wonder how to engineer their prompt or which LLM best balances accuracy, latency, and cost. 
-[Observability](https://en.wikipedia.org/wiki/Observability) and evaluations can help developers monitor their applications and rapidly answer these types of questions with confidence.
-
-
-## Standardized component interfaces
-
-LangChain provides common interfaces for components that are central to many AI applications.
-As an example, all [chat models](/docs/concepts/chat_models/) implement the [BaseChatModel](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface.
-This provides a standard way to interact with chat models, supporting important but often provider-specific features like [tool calling](/docs/concepts/tool_calling/) and [structured outputs](/docs/concepts/structured_outputs/).
-
-
-### Example: chat models 
-
-Many [model providers](/docs/concepts/chat_models/) support [tool calling](/docs/concepts/tool_calling/), a critical feature for many applications (e.g., [agents](https://langchain-ai.github.io/langgraph/concepts/agentic_concepts/)), that allows a developer to request model responses that match a particular schema.
-The APIs for each provider differ. 
-LangChain's [chat model](/docs/concepts/chat_models/) interface provides a common way to bind [tools](/docs/concepts/tools) to a model in order to support [tool calling](/docs/concepts/tool_calling/):
-
-```python
-# Tool creation
-tools = [my_tool]
-# Tool binding
-model_with_tools = model.bind_tools(tools)
-```
-
-Similarly, getting models to produce [structured outputs](/docs/concepts/structured_outputs/) is an extremely common use case. 
-Providers support different approaches for this, including [JSON mode or tool calling](https://platform.openai.com/docs/guides/structured-outputs), with different APIs.
-LangChain's [chat model](/docs/concepts/chat_models/) interface provides a common way to produce structured outputs using the `with_structured_output()` method:
-
-```python
-# Define schema
-schema = ...
-# Bind schema to model
-model_with_structure = model.with_structured_output(schema)
-```
-
-### Example: retrievers
-
-In the context of [RAG](/docs/concepts/rag/) and LLM application components, LangChain's [retriever](/docs/concepts/retrievers/) interface provides a standard way to connect to many different types of data services or databases (e.g., [vector stores](/docs/concepts/vectorstores) or databases).
-The underlying implementation of the retriever depends on the type of data store or database you are connecting to, but all retrievers implement the [runnable interface](/docs/concepts/runnables/), meaning they can be invoked in a common manner.
-
-```python
-documents = my_retriever.invoke("What is the meaning of life?")
-```
-
-## Orchestration 
-
-While standardization for individual components is useful, we've increasingly seen that developers want to *combine* components into more complex applications. 
-This motivates the need for [orchestration](https://en.wikipedia.org/wiki/Orchestration_(computing)).
-There are several common characteristics of LLM applications that this orchestration layer should support:
-
-* **Complex control flow:** The application requires complex patterns such as cycles (e.g., a loop that reiterates until a condition is met).
-* **[Persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/):** The application needs to maintain [short-term and / or long-term memory](https://langchain-ai.github.io/langgraph/concepts/memory/).
-* **[Human-in-the-loop](https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/):** The application needs human interaction, e.g., pausing, reviewing, editing, approving certain steps.
-
-The recommended way to orchestrate components for complex applications is [LangGraph](https://langchain-ai.github.io/langgraph/concepts/high_level/).
-LangGraph is a library that gives developers a high degree of control by expressing the flow of the application as a set of nodes and edges.
-LangGraph comes with built-in support for [persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/), [human-in-the-loop](https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/), [memory](https://langchain-ai.github.io/langgraph/concepts/memory/), and other features.
-It's particularly well suited for building [agents](https://langchain-ai.github.io/langgraph/concepts/agentic_concepts/) or [multi-agent](https://langchain-ai.github.io/langgraph/concepts/multi_agent/) applications. 
-Importantly, individual LangChain components can be used as LangGraph nodes, but you can also use LangGraph **without** using LangChain components.
-
-:::info[Further reading]
-
-Have a look at our free course, [Introduction to LangGraph](https://academy.langchain.com/courses/intro-to-langgraph), to learn more about how to use LangGraph to build complex applications.
-
-:::
-
-## Observability and evaluation
-
-The pace of AI application development is often rate-limited by high-quality evaluations because there is a paradox of choice. 
-Developers often wonder how to engineer their prompt or which LLM best balances accuracy, latency, and cost. 
-High quality tracing and evaluations can help you rapidly answer these types of questions with confidence.
-[LangSmith](https://docs.smith.langchain.com/) is our platform that supports observability and evaluation for AI applications.
-See our conceptual guides on [evaluations](https://docs.smith.langchain.com/concepts/evaluation) and [tracing](https://docs.smith.langchain.com/concepts/tracing) for more details.
-
-:::info[Further reading]
-
-See our video playlist on [LangSmith tracing and evaluations](https://youtube.com/playlist?list=PLfaIDFEXuae0um8Fj0V4dHG37fGFU8Q5S&feature=shared) for more details.
-
-:::
-
-## Conclusion
-
-LangChain offers standard interfaces for components that are central to many AI applications, which offers a few specific advantages:
-- **Ease of swapping providers:** It allows you to swap out different component providers without having to change the underlying code.
-- **Advanced features:** It provides common methods for more advanced features, such as [streaming](/docs/concepts/streaming) and [tool calling](/docs/concepts/tool_calling/).
-
-[LangGraph](https://langchain-ai.github.io/langgraph/concepts/high_level/) makes it possible to orchestrate complex applications (e.g., [agents](/docs/concepts/agents/)) and provide features like including [persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/), [human-in-the-loop](https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/), or [memory](https://langchain-ai.github.io/langgraph/concepts/memory/).
-
-[LangSmith](https://docs.smith.langchain.com/) makes it possible to iterate with confidence on your applications, by providing LLM-specific observability and framework for testing and evaluating your application.
diff --git a/langchain_md_files/contributing/how_to/code/guidelines.mdx b/langchain_md_files/contributing/how_to/code/guidelines.mdx
deleted file mode 100644
index 54bf05238d1cb93e44b455985d76ac18ac34f74d..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/code/guidelines.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
-# General guidelines
-
-Here are some things to keep in mind for all types of contributions:
-
-- Follow the ["fork and pull request"](https://docs.github.com/en/get-started/exploring-projects-on-github/contributing-to-a-project) workflow.
-- Fill out the checked-in pull request template when opening pull requests. Note related issues and tag relevant maintainers.
-- Ensure your PR passes formatting, linting, and testing checks before requesting a review.
-  - If you would like comments or feedback on your current progress, please open an issue or discussion and tag a maintainer.
-  - See the sections on [Testing](setup.mdx#testing) and [Formatting and Linting](setup.mdx#formatting-and-linting) for how to run these checks locally.
-- Backwards compatibility is key. Your changes must not be breaking, except in case of critical bug and security fixes.
-- Look for duplicate PRs or issues that have already been opened before opening a new one.
-- Keep scope as isolated as possible. As a general rule, your changes should not affect more than one package at a time.
-
-## Bugfixes
-
-We encourage and appreciate bugfixes. We ask that you:
-
-- Explain the bug in enough detail for maintainers to be able to reproduce it.
-  - If an accompanying issue exists, link to it. Prefix with `Fixes` so that the issue will close automatically when the PR is merged.
-- Avoid breaking changes if possible.
-- Include unit tests that fail without the bugfix.
-
-If you come across a bug and don't know how to fix it, we ask that you open an issue for it describing in detail the environment in which you encountered the bug.
-
-## New features
-
-We aim to keep the bar high for new features. We generally don't accept new core abstractions, changes to infra, changes to dependencies,
-or new agents/chains from outside contributors without an existing GitHub discussion or issue that demonstrates an acute need for them.
-
-- New features must come with docs, unit tests, and (if appropriate) integration tests.
-- New integrations must come with docs, unit tests, and (if appropriate) integration tests.
-  - See [this page](../integrations/index.mdx) for more details on contributing new integrations.
-- New functionality should not inherit from or use deprecated methods or classes.
-- We will reject features that are likely to lead to security vulnerabilities or reports.
-- Do not add any hard dependencies. Integrations may add optional dependencies.
diff --git a/langchain_md_files/contributing/how_to/code/index.mdx b/langchain_md_files/contributing/how_to/code/index.mdx
deleted file mode 100644
index cd3889bed0cf4904f1ef11e6ded662c7899991ae..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/code/index.mdx
+++ /dev/null
@@ -1,6 +0,0 @@
-# Contribute Code
-
-If you would like to add a new feature or update an existing one, please read the resources below before getting started:
-
-- [General guidelines](guidelines.mdx)
-- [Setup](setup.mdx)
diff --git a/langchain_md_files/contributing/how_to/code/setup.mdx b/langchain_md_files/contributing/how_to/code/setup.mdx
deleted file mode 100644
index 6109e4987dcf4e18d3b1c6e9c2e1d646f85c6305..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/code/setup.mdx
+++ /dev/null
@@ -1,192 +0,0 @@
-# Setup
-
-This guide walks through how to run the repository locally and check in your first code.
-For a [development container](https://containers.dev/), see the [.devcontainer folder](https://github.com/langchain-ai/langchain/tree/master/.devcontainer).
-
-## Dependency Management: `uv` and other env/dependency managers
-
-This project utilizes [uv](https://docs.astral.sh/uv/) v0.5+ as a dependency manager.
-
-Install `uv`: **[documentation on how to install it](https://docs.astral.sh/uv/getting-started/installation/)**.
-
-## Different packages
-
-This repository contains multiple packages:
-- `langchain-core`: Base interfaces for key abstractions as well as logic for combining them in chains (LangChain Expression Language).
-- `langchain-community`: Third-party integrations of various components.
-- `langchain`: Chains, agents, and retrieval logic that makes up the cognitive architecture of your applications.
-- `langchain-experimental`: Components and chains that are experimental, either in the sense that the techniques are novel and still being tested, or they require giving the LLM more access than would be possible in most production systems.
-- Partner integrations: Partner packages in `libs/partners` that are independently version controlled.
-
-Each of these has its own development environment. Docs are run from the top-level makefile, but development
-is split across separate test & release flows.
-
-For this quickstart, start with langchain-community:
-
-```bash
-cd libs/community
-```
-
-## Local Development Dependencies
-
-Install langchain-community development requirements (for running langchain, running examples, linting, formatting, tests, and coverage):
-
-```bash
-uv sync
-```
-
-Then verify dependency installation:
-
-```bash
-make test
-```
-
-## Testing
-
-**Note:** In `langchain`, `langchain-community`, and `langchain-experimental`, some test dependencies are optional. See the following section about optional dependencies.
-
-Unit tests cover modular logic that does not require calls to outside APIs.
-If you add new logic, please add a unit test.
-
-To run unit tests:
-
-```bash
-make test
-```
-
-To run unit tests in Docker:
-
-```bash
-make docker_tests
-```
-
-There are also [integration tests and code-coverage](../testing.mdx) available.
-
-### Only develop langchain_core or langchain_community
-
-If you are only developing `langchain_core` or `langchain_community`, you can simply install the dependencies for the respective projects and run tests:
-
-```bash
-cd libs/core
-make test
-```
-
-Or:
-
-```bash
-cd libs/community
-make test
-```
-
-## Formatting and Linting
-
-Run these locally before submitting a PR; the CI system will check also.
-
-### Code Formatting
-
-Formatting for this project is done via [ruff](https://docs.astral.sh/ruff/rules/).
-
-To run formatting for docs, cookbook and templates:
-
-```bash
-make format
-```
-
-To run formatting for a library, run the same command from the relevant library directory:
-
-```bash
-cd libs/{LIBRARY}
-make format
-```
-
-Additionally, you can run the formatter only on the files that have been modified in your current branch as compared to the master branch using the format_diff command:
-
-```bash
-make format_diff
-```
-
-This is especially useful when you have made changes to a subset of the project and want to ensure your changes are properly formatted without affecting the rest of the codebase.
-
-#### Linting
-
-Linting for this project is done via a combination of [ruff](https://docs.astral.sh/ruff/rules/) and [mypy](http://mypy-lang.org/).
-
-To run linting for docs, cookbook and templates:
-
-```bash
-make lint
-```
-
-To run linting for a library, run the same command from the relevant library directory:
-
-```bash
-cd libs/{LIBRARY}
-make lint
-```
-
-In addition, you can run the linter only on the files that have been modified in your current branch as compared to the master branch using the lint_diff command:
-
-```bash
-make lint_diff
-```
-
-This can be very helpful when you've made changes to only certain parts of the project and want to ensure your changes meet the linting standards without having to check the entire codebase.
-
-We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
-
-### Spellcheck
-
-Spellchecking for this project is done via [codespell](https://github.com/codespell-project/codespell).
-Note that `codespell` finds common typos, so it could have false-positive (correctly spelled but rarely used) and false-negatives (not finding misspelled) words.
-
-To check spelling for this project:
-
-```bash
-make spell_check
-```
-
-To fix spelling in place:
-
-```bash
-make spell_fix
-```
-
-If codespell is incorrectly flagging a word, you can skip spellcheck for that word by adding it to the codespell config in the `pyproject.toml` file.
-
-```python
-[tool.codespell]
-...
-# Add here:
-ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure'
-```
-
-## Working with Optional Dependencies
-
-`langchain`, `langchain-community`, and `langchain-experimental` rely on optional dependencies to keep these packages lightweight.
-
-`langchain-core` and partner packages **do not use** optional dependencies in this way.
-
-You'll notice that `pyproject.toml` and `uv.lock` are **not** touched when you add optional dependencies below.
-
-If you're adding a new dependency to Langchain, assume that it will be an optional dependency, and
-that most users won't have it installed.
-
-Users who do not have the dependency installed should be able to **import** your code without
-any side effects (no warnings, no errors, no exceptions).
-
-To introduce the dependency to a library, please do the following:
-
-1. Open extended_testing_deps.txt and add the dependency
-2. Add a unit test that the very least attempts to import the new code. Ideally, the unit
-test makes use of lightweight fixtures to test the logic of the code.
-3. Please use the `@pytest.mark.requires(package_name)` decorator for any unit tests that require the dependency.
-
-## Adding a Jupyter Notebook
-
-If you are adding a Jupyter Notebook example, you'll want to run with `test` dependencies:
-
-```bash
-uv run --group test jupyter notebook
-```
-
-When you run `uv sync`, the `langchain` package is installed as editable in the virtualenv, so your new logic can be imported into the notebook.
diff --git a/langchain_md_files/contributing/how_to/documentation/index.mdx b/langchain_md_files/contributing/how_to/documentation/index.mdx
deleted file mode 100644
index 53e3cb052717cade08c1c93f9e2508e597ff9c80..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/documentation/index.mdx
+++ /dev/null
@@ -1,7 +0,0 @@
-# Contribute Documentation
-
-Documentation is a vital part of LangChain. We welcome both new documentation for new features and 
-community improvements to our current documentation. Please read the resources below before getting started:
-
-- [Documentation style guide](style_guide.mdx)
-- [Setup](setup.mdx)
diff --git a/langchain_md_files/contributing/how_to/documentation/setup.mdx b/langchain_md_files/contributing/how_to/documentation/setup.mdx
deleted file mode 100644
index d7ad896d74aac4522961d93ddbbee655b48a4c94..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/documentation/setup.mdx
+++ /dev/null
@@ -1,168 +0,0 @@
----
-sidebar_class_name: "hidden"
----
-
-# Setup
-
-LangChain documentation consists of two components:
-
-1. Main Documentation: Hosted at [python.langchain.com](https://python.langchain.com/),
-this comprehensive resource serves as the primary user-facing documentation.
-It covers a wide array of topics, including tutorials, use cases, integrations,
-and more, offering extensive guidance on building with LangChain.
-The content for this documentation lives in the `/docs` directory of the monorepo.
-2. In-code Documentation: This is documentation of the codebase itself, which is also
-used to generate the externally facing [API Reference](https://python.langchain.com/api_reference/langchain/index.html).
-The content for the API reference is autogenerated by scanning the docstrings in the codebase. For this reason we ask that
-developers document their code well.
-
-The `API Reference` is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/)
-from the code and is hosted by [Read the Docs](https://readthedocs.org/).
-
-We appreciate all contributions to the documentation, whether it be fixing a typo,
-adding a new tutorial or example and whether it be in the main documentation or the API Reference.
-
-Similar to linting, we recognize documentation can be annoying. If you do not want
-to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
-
-## 📜 Main Documentation
-
-The content for the main documentation is located in the `/docs` directory of the monorepo.
-
-The documentation is written using a combination of ipython notebooks (`.ipynb` files)
-and markdown (`.mdx` files). The notebooks are converted to markdown
-and then built using [Docusaurus 2](https://docusaurus.io/).
-
-Feel free to make contributions to the main documentation! 🥰
-
-After modifying the documentation:
-
-1. Run the linting and formatting commands (see below) to ensure that the documentation is well-formatted and free of errors.
-2. Optionally build the documentation locally to verify that the changes look good.
-3. Make a pull request with the changes.
-4. You can preview and verify that the changes are what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page. This will take you to a preview of the documentation changes.
-
-## ⚒️ Linting and Building Documentation Locally
-
-After writing up the documentation, you may want to lint and build the documentation
-locally to ensure that it looks good and is free of errors.
-
-If you're unable to build it locally that's okay as well, as you will be able to
-see a preview of the documentation on the pull request page.
-
-
-### Building
-
-The code that builds the documentation is located in the `/docs` directory of the monorepo.
-
-In the following commands, the prefix `api_` indicates that those are operations for the API Reference.
-
-Before building the documentation, it is always a good idea to clean the build directory:
-
-```bash
-make docs_clean
-make api_docs_clean
-```
-
-Next, you can build the documentation as outlined below:
-
-```bash
-make docs_build
-make api_docs_build
-```
-
-:::tip
-
-The `make api_docs_build` command takes a long time. If you're making cosmetic changes to the API docs and want to see how they look, use:
-
-```bash
-make api_docs_quick_preview
-```
-
-which will just build a small subset of the API reference.
-
-:::
-
-Finally, run the link checker to ensure all links are valid:
-
-```bash
-make docs_linkcheck
-make api_docs_linkcheck
-```
-
-### Linting and Formatting
-
-The Main Documentation is linted from the **monorepo root**. To lint the main documentation, run the following from there:
-
-```bash
-make lint
-```
-
-If you have formatting-related errors, you can fix them automatically with:
-
-```bash
-make format
-```
-
-## ⌨️ In-code Documentation
-
-The in-code documentation is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code and is hosted by [Read the Docs](https://readthedocs.org/).
-
-For the API reference to be useful, the codebase must be well-documented. This means that all functions, classes, and methods should have a docstring that explains what they do, what the arguments are, and what the return value is. This is a good practice in general, but it is especially important for LangChain because the API reference is the primary resource for developers to understand how to use the codebase.
-
-We generally follow the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for docstrings.
-
-Here is an example of a well-documented function:
-
-```python
-
-def my_function(arg1: int, arg2: str) -> float:
-    """This is a short description of the function. (It should be a single sentence.)
-
-    This is a longer description of the function. It should explain what
-    the function does, what the arguments are, and what the return value is.
-    It should wrap at 88 characters.
-
-    Examples:
-        This is a section for examples of how to use the function.
-
-        .. code-block:: python
-
-            my_function(1, "hello")
-
-    Args:
-        arg1: This is a description of arg1. We do not need to specify the type since
-            it is already specified in the function signature.
-        arg2: This is a description of arg2.
-
-    Returns:
-        This is a description of the return value.
-    """
-    return 3.14
-```
-
-### Linting and Formatting
-
-The in-code documentation is linted from the directories belonging to the packages
-being documented.
-
-For example, if you're working on the `langchain-community` package, you would change
-the working directory to the `langchain-community` directory:
-
-```bash
-cd [root]/libs/langchain-community
-```
-
-Then you can run the following commands to lint and format the in-code documentation:
-
-```bash
-make format
-make lint
-```
-
-## Verify Documentation Changes
-
-After pushing documentation changes to the repository, you can preview and verify that the changes are
-what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page.
-This will take you to a preview of the documentation changes.
-This preview is created by [Vercel](https://vercel.com/docs/getting-started-with-vercel).
\ No newline at end of file
diff --git a/langchain_md_files/contributing/how_to/documentation/style_guide.mdx b/langchain_md_files/contributing/how_to/documentation/style_guide.mdx
deleted file mode 100644
index 2eb20d685378604ad2b3cd2cc3d0eb0dd304b607..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/documentation/style_guide.mdx
+++ /dev/null
@@ -1,160 +0,0 @@
----
-sidebar_class_name: "hidden"
----
-
-# Documentation Style Guide
-
-As LangChain continues to grow, the amount of documentation required to cover the various concepts and integrations continues to grow too.
-This page provides guidelines for anyone writing documentation for LangChain and outlines some of our philosophies around
-organization and structure.
-
-## Philosophy
-
-LangChain's documentation follows the [Diataxis framework](https://diataxis.fr).
-Under this framework, all documentation falls under one of four categories: [Tutorials](#tutorials),
-[How-to guides](#how-to-guides),
-[References](#references), and [Explanations](#conceptual-guide).
-
-### Tutorials
-
-Tutorials are lessons that take the reader through a practical activity. Their purpose is to help the user
-gain an understanding of concepts and how they interact by showing one way to achieve a specific goal in a hands-on manner. They should **avoid** giving
-multiple permutations of ways to achieve that goal in-depth. Instead, it should guide a new user through a recommended path to accomplish the tutorial's goal. While the end result of a tutorial does not necessarily need to
-be completely production-ready, it should be useful and practically satisfy the goal that is clearly stated in the tutorial's introduction. Information on how to address additional scenarios
-belongs in how-to guides.
-
-To quote the Diataxis website:
-
-> A tutorial serves the user’s *acquisition* of skills and knowledge - their study. Its purpose is not to help the user get something done, but to help them learn.
-
-In LangChain, these are often higher level guides that show off end-to-end use cases.
-
-Some examples include:
-
-- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain/)
-- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)
-
-A good structural rule of thumb is to follow the structure of this [example from Numpy](https://numpy.org/numpy-tutorials/content/tutorial-svd.html).
-  
-Here are some high-level tips on writing a good tutorial:
-
-- Focus on guiding the user to get something done, but keep in mind the end-goal is more to impart principles than to create a perfect production system.
-- Be specific, not abstract and follow one path.
-  - No need to go deeply into alternative approaches, but it’s ok to reference them, ideally with a link to an appropriate how-to guide.
-- Get "a point on the board" as soon as possible - something the user can run that outputs something.
-  - You can iterate and expand afterwards.
-  - Try to frequently checkpoint at given steps where the user can run code and see progress.
-- Focus on results, not technical explanation.
-  - Crosslink heavily to appropriate conceptual/reference pages.
-- The first time you mention a LangChain concept, use its full name (e.g. "LangChain Expression Language (LCEL)"), and link to its conceptual/other documentation page.
-  - It's also helpful to add a prerequisite callout that links to any pages with necessary background information.
-- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as related how-to guides.
-  
-### How-to guides
-
-A how-to guide, as the name implies, demonstrates how to do something discrete and specific.
-It should assume that the user is already familiar with underlying concepts, and is focused on solving an immediate problem. However,
-it should still provide some background or list certain scenarios where the information may be relevant.
-They can and should discuss alternatives if one approach may be better than another in certain cases.
-
-To quote the Diataxis website:
-
-> A how-to guide serves the work of the already-competent user, whom you can assume to know what they want to do, and to be able to follow your instructions correctly.
-
-Some examples include:
-
-- [How to: return structured data from a model](/docs/how_to/structured_output/)
-- [How to: write a custom chat model](/docs/how_to/custom_chat_model/)
-
-Here are some high-level tips on writing a good how-to guide:
-
-- Clearly explain what you are guiding the user through at the start.
-- Assume higher intent than a tutorial and show what the user needs to do to get that task done.
-- Assume familiarity of concepts, but explain why suggested actions are helpful.
-  - Crosslink heavily to conceptual/reference pages.
-- Discuss alternatives and responses to real-world tradeoffs that may arise when solving a problem.
-- Use lots of example code.
-  - Prefer full code blocks that the reader can copy and run.
-- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as other related how-to guides.
-
-### Conceptual guide
-
-LangChain's conceptual guide falls under the **Explanation** quadrant of Diataxis. These guides should cover LangChain terms and concepts
-in a more abstract way than how-to guides or tutorials, targeting curious users interested in
-gaining a deeper understanding and insights of the framework. Try to avoid excessively large code examples as the primary goal is to
-provide perspective to the user rather than to finish a practical project. These guides should cover **why** things work they way they do.
-
-This guide on documentation style is meant to fall under this category.
-
-To quote the Diataxis website:
-
-> The perspective of explanation is higher and wider than that of the other types. It does not take the user’s eye-level view, as in a how-to guide, or a close-up view of the machinery, like reference material. Its scope in each case is a topic - “an area of knowledge”, that somehow has to be bounded in a reasonable, meaningful way.
-
-Some examples include:
-
-- [Retrieval conceptual docs](/docs/concepts/retrieval)
-- [Chat model conceptual docs](/docs/concepts/chat_models)
-
-Here are some high-level tips on writing a good conceptual guide:
-
-- Explain design decisions. Why does concept X exist and why was it designed this way?
-- Use analogies and reference other concepts and alternatives
-- Avoid blending in too much reference content
-- You can and should reference content covered in other guides, but make sure to link to them
-
-### References
-
-References contain detailed, low-level information that describes exactly what functionality exists and how to use it.
-In LangChain, this is mainly our API reference pages, which are populated from docstrings within code.
-References pages are generally not read end-to-end, but are consulted as necessary when a user needs to know
-how to use something specific.
-
-To quote the Diataxis website:
-
-> The only purpose of a reference guide is to describe, as succinctly as possible, and in an orderly way. Whereas the content of tutorials and how-to guides are led by needs of the user, reference material is led by the product it describes.
-
-Many of the reference pages in LangChain are automatically generated from code,
-but here are some high-level tips on writing a good docstring:
-
-- Be concise
-- Discuss special cases and deviations from a user's expectations
-- Go into detail on required inputs and outputs
-- Light details on when one might use the feature are fine, but in-depth details belong in other sections.
-
-Each category serves a distinct purpose and requires a specific approach to writing and structuring the content.
-
-## General guidelines
-
-Here are some other guidelines you should think about when writing and organizing documentation.
-
-We generally do not merge new tutorials from outside contributors without an actue need.
-We welcome updates as well as new integration docs, how-tos, and references.
-
-### Avoid duplication
-
-Multiple pages that cover the same material in depth are difficult to maintain and cause confusion. There should
-be only one (very rarely two), canonical pages for a given concept or feature. Instead, you should link to other guides.
-
-### Link to other sections
-
-Because sections of the docs do not exist in a vacuum, it is important to link to other sections frequently,
-to allow a developer to learn more about an unfamiliar topic within the flow of reading.
-
-This includes linking to the API references and conceptual sections!
-
-### Be concise
-
-In general, take a less-is-more approach. If another section with a good explanation of a concept exists, you should link to it rather than
-re-explain it, unless the concept you are documenting presents some new wrinkle.
-
-Be concise, including in code samples.
-
-### General style
-
-- Use active voice and present tense whenever possible
-- Use examples and code snippets to illustrate concepts and usage
-- Use appropriate header levels (`#`, `##`, `###`, etc.) to organize the content hierarchically
-- Use fewer cells with more code to make copy/paste easier
-- Use bullet points and numbered lists to break down information into easily digestible chunks
-- Use tables (especially for **Reference** sections) and diagrams often to present information visually
-- Include the table of contents for longer documentation pages to help readers navigate the content, but hide it for shorter pages
diff --git a/langchain_md_files/contributing/how_to/index.mdx b/langchain_md_files/contributing/how_to/index.mdx
deleted file mode 100644
index 675586340f427859c31c0bed7ebc961db9a5c248..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/index.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
-# How-to Guides
-
-- [**Documentation**](documentation/index.mdx): Help improve our docs, including this one!
-- [**Code**](code/index.mdx): Help us write code, fix bugs, or improve our infrastructure.
-
-## Integrations
-
-- [**Start Here**](integrations/index.mdx): Help us integrate with your favorite vendors and tools.
-- [**Package**](integrations/package): Publish an integration package to PyPi
-- [**Standard Tests**](integrations/standard_tests): Ensure your integration passes an expected set of tests.
diff --git a/langchain_md_files/contributing/how_to/integrations/community.mdx b/langchain_md_files/contributing/how_to/integrations/community.mdx
deleted file mode 100644
index 945ae766f7cd605c23db1070993609703f265be0..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/integrations/community.mdx
+++ /dev/null
@@ -1,55 +0,0 @@
----
-pagination_next: null
-pagination_prev: null
----
-## How to add a community integration (not recommended)
-
-:::danger
-
-We recommend following the [main integration guide](./index.mdx) to add new integrations instead.
-
-If you follow this guide, there is a high likelihood we will close your PR with the above
-guide linked without much discussion.
-
-:::
-
-The `langchain-community` package is in `libs/community`.
-
-It can be installed with `pip install langchain-community`, and exported members can be imported with code like 
-
-```python
-from langchain_community.chat_models import ChatParrotLink
-from langchain_community.llms import ParrotLinkLLM
-from langchain_community.vectorstores import ParrotLinkVectorStore
-```
-
-The `community` package relies on manually-installed dependent packages, so you will see errors 
-if you try to import a package that is not installed. In our fake example, if you tried to import `ParrotLinkLLM` without installing `parrot-link-sdk`, you will see an `ImportError` telling you to install it when trying to use it.
-
-Let's say we wanted to implement a chat model for Parrot Link AI. We would create a new file in `libs/community/langchain_community/chat_models/parrot_link.py` with the following code:
-
-```python
-from langchain_core.language_models.chat_models import BaseChatModel
-
-class ChatParrotLink(BaseChatModel):
-    """ChatParrotLink chat model.
-
-    Example:
-        .. code-block:: python
-
-            from langchain_community.chat_models import ChatParrotLink
-
-            model = ChatParrotLink()
-    """
-
-    ...
-```
-
-And we would write tests in:
-
-- Unit tests: `libs/community/tests/unit_tests/chat_models/test_parrot_link.py`
-- Integration tests: `libs/community/tests/integration_tests/chat_models/test_parrot_link.py`
-
-And add documentation to:
-
-- `docs/docs/integrations/chat/parrot_link.ipynb`
diff --git a/langchain_md_files/contributing/how_to/integrations/from_template.mdx b/langchain_md_files/contributing/how_to/integrations/from_template.mdx
deleted file mode 100644
index 9365ff23dd5c912320e52dda72d92a6a8e96dcd7..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/integrations/from_template.mdx
+++ /dev/null
@@ -1,137 +0,0 @@
----
-pagination_next: null
-pagination_prev: null
----
-
-# How to publish an integration package from a template
-
-:::danger
-This guide is a work-in-progress.
-:::
-
-First, duplicate this template repository: https://github.com/langchain-ai/integration-repo-template
-
-In this guide, we will create a `libs/langchain-parrot-link` folder, simulating the creation
-of a partner package for a fake company, "Parrot Link AI".
-
-A package is 
-installed by users with `pip install langchain-{partner}`, and the package members 
-can be imported with code like:
-
-```python
-from langchain_{partner} import X
-```
-
-## Set up a new package
-
-To set up a new partner package, use the latest version of the LangChain CLI. You can install or update it with:
-
-```bash
-pip install -U langchain-cli
-```
-
-Let's say you want to create a new partner package working for a company called Parrot Link AI.
-
-Then, run the following command to create a new partner package:
-
-```bash
-mkdir libs
-cd libs/
-langchain-cli integration new
-> Name: parrot-link
-> Name of integration in PascalCase [ParrotLink]: ParrotLink
-```
-
-This will create a new package in `libs/parrot-link` with the following structure:
-
-```
-libs/parrot-link/
-  langchain_parrot_link/ # folder containing your package
-    ...
-  tests/
-    ...
-  docs/ # bootstrapped docs notebooks, must be moved to /docs in monorepo root
-    ...
-  scripts/ # scripts for CI
-    ...
-  LICENSE
-  README.md # fill out with information about your package
-  Makefile # default commands for CI
-  pyproject.toml # package metadata, mostly managed by Poetry
-  poetry.lock # package lockfile, managed by Poetry
-  .gitignore
-```
-
-## Implement your package
-
-First, add any dependencies your package needs, such as your company's SDK:
-
-```bash
-poetry add parrot-link-sdk
-```
-
-If you need separate dependencies for type checking, you can add them to the `typing` group with:
-
-```bash
-poetry add --group typing types-parrot-link-sdk
-```
-
-Then, implement your package in `libs/partners/parrot-link/langchain_parrot_link`.
-
-By default, this will include stubs for a Chat Model, an LLM, and/or a Vector Store. You should delete any of the files you won't use and remove them from `__init__.py`.
-
-## Write Unit and Integration Tests
-
-Some basic tests are presented in the `tests/` directory. You should add more tests to cover your package's functionality.
-
-For information on running and implementing tests, see the [Testing guide](../testing.mdx).
-
-## Write documentation
-
-Documentation is generated from Jupyter notebooks in the `docs/` directory. You should place the notebooks with examples
-to the relevant `docs/docs/integrations` directory in the monorepo root.
-
-## (If Necessary) Deprecate community integration
-
-Note: this is only necessary if you're migrating an existing community integration into 
-a partner package. If the component you're integrating is net-new to LangChain (i.e. 
-not already in the `community` package), you can skip this step.
-
-Let's pretend we migrated our `ChatParrotLink` chat model from the community package to 
-the partner package. We would need to deprecate the old model in the community package.
-
-We would do that by adding a `@deprecated` decorator to the old model as follows, in
-`libs/community/langchain_community/chat_models/parrot_link.py`.
-
-Before our change, our chat model might look like this:
-
-```python
-class ChatParrotLink(BaseChatModel):
-  ...
-```
-
-After our change, it would look like this:
-
-```python
-from langchain_core._api.deprecation import deprecated
-
-@deprecated(
-    since="0.0.<next community version>", 
-    removal="1.0.0", 
-    alternative_import="langchain_parrot_link.ChatParrotLink"
-)
-class ChatParrotLink(BaseChatModel):
-  ...
-```
-
-You should do this for *each* component that you're migrating to the partner package.
-
-## Additional steps
-
-Contributor steps:
-
-- [ ] Add secret names to manual integrations workflow in `.github/workflows/_integration_test.yml`
-- [ ] Add secrets to release workflow (for pre-release testing) in `.github/workflows/_release.yml`
-- [ ] set up pypi and test pypi projects
-- [ ] add credential secrets to Github Actions
-- [ ] add package to conda-forge
diff --git a/langchain_md_files/contributing/how_to/integrations/index.mdx b/langchain_md_files/contributing/how_to/integrations/index.mdx
deleted file mode 100644
index 3623f621b7f3d30f84e216b6e951ab6af2e6d55f..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/integrations/index.mdx
+++ /dev/null
@@ -1,91 +0,0 @@
----
-pagination_prev: null
-pagination_next: contributing/how_to/integrations/package
----
-
-# Contribute Integrations
-
-Integrations are a core component of LangChain.
-LangChain provides standard interfaces for several different components (language models, vector stores, etc) that are crucial when building LLM applications.
-
-
-## Why contribute an integration to LangChain?
-
-- **Discoverability:** LangChain is the most used framework for building LLM applications, with over 20 million monthly downloads. LangChain integrations are discoverable by a large community of GenAI builders.
-- **Interoperability:** LangChain components expose a standard interface, allowing developers to easily swap them for each other. If you implement a LangChain integration, any developer using a different component will easily be able to swap yours in.
-- **Best Practices:** Through their standard interface, LangChain components encourage and facilitate best practices (streaming, async, etc)
-
-
-## Components to Integrate
-
-:::info
-
-See the [Conceptual Guide](../../../concepts/index.mdx) for an overview of all components
-supported in LangChain
-
-:::
-
-While any component can be integrated into LangChain, there are specific types of integrations we encourage more:
-
-<table>
-  <tr>
-    <th>Integrate these ✅</th>
-    <th>Not these ❌</th>
-  </tr>
-  <tr>
-    <td>
-      <ul>
-        <li>Chat Models</li>
-        <li>Tools/Toolkits</li>
-        <li>Retrievers</li>
-        <li>Vector Stores</li>
-        <li>Embedding Models</li>
-      </ul>
-    </td>
-    <td>
-      <ul>
-        <li>LLMs (Text-Completion Models)</li>
-        <li>Document Loaders</li>
-        <li>Key-Value Stores</li>
-        <li>Document Transformers</li>
-        <li>Model Caches</li>
-        <li>Graphs</li>
-        <li>Message Histories</li>
-        <li>Callbacks</li>
-        <li>Chat Loaders</li>
-        <li>Adapters</li>
-        <li>Other abstractions</li>
-      </ul>
-    </td>
-  </tr>
-</table>
-
-## How to contribute an integration
-
-In order to contribute an integration, you should follow these steps:
-
-1. Confirm that your integration is in the [list of components](#components-to-integrate) we are currently encouraging.
-2. [Implement your package](/docs/contributing/how_to/integrations/package/) and publish it to a public github repository.
-3. [Implement the standard tests](./standard_tests) for your integration and successfully run them.
-4. [Publish your integration](./publish.mdx) by publishing the package to PyPi and add docs in the `docs/docs/integrations` directory of the LangChain monorepo.
-5. [Optional] Open and merge a PR to add documentation for your integration to the official LangChain docs.
-6. [Optional] Engage with the LangChain team for joint co-marketing ([see below](#co-marketing)).
-
-## Co-Marketing
-
-With over 20 million monthly downloads, LangChain has a large audience of developers building LLM applications.
-Besides just adding integrations, we also like to show them examples of cool tools or APIs they can use.
-
-While traditionally called "co-marketing", we like to think of this more as "co-education".
-For that reason, while we are happy to highlight your integration through our social media channels, we prefer to highlight examples that also serve some educational purpose.
-Our main social media channels are Twitter and LinkedIn.
-
-Here are some heuristics for types of content we are excited to promote:
-
-- **Integration announcement:** If you announce the integration with a link to the LangChain documentation page, we are happy to re-tweet/re-share on Twitter/LinkedIn.
-- **Educational content:** We highlight good educational content on the weekends - if you write a good blog or make a good YouTube video, we are happy to share there! Note that we prefer content that is NOT framed as "here's how to use integration XYZ", but rather "here's how to do ABC", as we find that is more educational and helpful for developers.
-- **End-to-end applications:** End-to-end applications are great resources for developers looking to build. We prefer to highlight applications that are more complex/agentic in nature, and that use [LangGraph](https://github.com/langchain-ai/langgraph) as the orchestration framework. We get particularly excited about anything involving long-term memory, human-in-the-loop interaction patterns, or multi-agent architectures.
-- **Research:** We love highlighting novel research! Whether it is research built on top of LangChain or that integrates with it.
-
-## Further Reading
-To get started, let's learn [how to implement an integration package](/docs/contributing/how_to/integrations/package/) for LangChain.
diff --git a/langchain_md_files/contributing/how_to/integrations/package.mdx b/langchain_md_files/contributing/how_to/integrations/package.mdx
deleted file mode 100644
index e824a0ffa266f8c29b68c19cd649966135922f72..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/integrations/package.mdx
+++ /dev/null
@@ -1,474 +0,0 @@
----
-pagination_next: contributing/how_to/integrations/standard_tests
-pagination_prev: contributing/how_to/integrations/index
----
-# How to implement an integration package
-
-This guide walks through the process of implementing a LangChain integration 
-package.
-
-Integration packages are just Python packages that can be installed with `pip install <your-package>`,
-which contain classes that are compatible with LangChain's core interfaces.
-
-We will cover:
-
-1. (Optional) How to bootstrap a new integration package
-2. How to implement components, such as [chat models](/docs/concepts/chat_models/) and [vector stores](/docs/concepts/vectorstores/), that adhere
-to the LangChain interface;  
-
-## (Optional) bootstrapping a new integration package
-
-In this section, we will outline 2 options for bootstrapping a new integration package, 
-and you're welcome to use other tools if you prefer!
-
-1. **langchain-cli**: This is a command-line tool that can be used to bootstrap a new integration package with a template for LangChain components and Poetry for dependency management.
-2. **Poetry**: This is a Python dependency management tool that can be used to bootstrap a new Python package with dependencies. You can then add LangChain components to this package.
-
-<details>
-    <summary>Option 1: langchain-cli (recommended)</summary>
-
-In this guide, we will be using the `langchain-cli` to create a new integration package
-from a template, which can be edited to implement your LangChain components.
-
-### **Prerequisites**
-
-- [GitHub](https://github.com) account
-- [PyPi](https://pypi.org/) account
-
-### Boostrapping a new Python package with langchain-cli
-
-First, install `langchain-cli` and `poetry`:
-
-```bash
-pip install langchain-cli poetry
-```
-
-Next, come up with a name for your package. For this guide, we'll use `langchain-parrot-link`.
-You can confirm that the name is available on PyPi by searching for it on the [PyPi website](https://pypi.org/).
-
-Next, create your new Python package with `langchain-cli`, and navigate into the new directory with `cd`:
-
-```bash
-langchain-cli integration new
-
-> The name of the integration to create (e.g. `my-integration`): parrot-link
-> Name of integration in PascalCase [ParrotLink]:
-
-cd parrot-link
-```
-
-Next, let's add any dependencies we need
-
-```bash
-poetry add my-integration-sdk
-```
-
-We can also add some `typing` or `test` dependencies in a separate poetry dependency group.
-
-```
-poetry add --group typing my-typing-dep
-poetry add --group test my-test-dep
-```
-
-And finally, have poetry set up a virtual environment with your dependencies, as well
-as your integration package:
-
-```bash
-poetry install --with lint,typing,test,test_integration
-```
-
-You now have a new Python package with a template for LangChain components! This
-template comes with files for each integration type, and you're welcome to duplicate or
-delete any of these files as needed (including the associated test files).
-
-To create any individual files from the [template], you can run e.g.:
-
-```bash
-langchain-cli integration new \
-    --name parrot-link \
-    --name-class ParrotLink \
-    --src integration_template/chat_models.py \
-    --dst langchain_parrot_link/chat_models_2.py
-```
-
-</details>
-
-<details>
-    <summary>Option 2: Poetry (manual)</summary>
-
-In this guide, we will be using [Poetry](https://python-poetry.org/) for
-dependency management and packaging, and you're welcome to use any other tools you prefer.
-
-### **Prerequisites**
-
-- [GitHub](https://github.com) account
-- [PyPi](https://pypi.org/) account
-
-### Boostrapping a new Python package with Poetry
-
-First, install Poetry:
-
-```bash
-pip install poetry
-```
-
-Next, come up with a name for your package. For this guide, we'll use `langchain-parrot-link`.
-You can confirm that the name is available on PyPi by searching for it on the [PyPi website](https://pypi.org/).
-
-Next, create your new Python package with Poetry, and navigate into the new directory with `cd`:
-
-```bash
-poetry new langchain-parrot-link
-cd langchain-parrot-link
-```
-
-Add main dependencies using Poetry, which will add them to your `pyproject.toml` file:
-
-```bash
-poetry add langchain-core
-```
-
-We will also add some `test` dependencies in a separate poetry dependency group. If
-you are not using Poetry, we recommend adding these in a way that won't package them
-with your published package, or just installing them separately when you run tests.
-
-`langchain-tests` will provide the [standard tests](../standard_tests) we will use later. 
-We recommended pinning these to the latest version: <img src="https://img.shields.io/pypi/v/langchain-tests" style={{position:"relative",top:4,left:3}} />
-
-Note: Replace `<latest_version>` with the latest version of `langchain-tests` below.
-
-```bash
-poetry add --group test pytest pytest-socket pytest-asyncio langchain-tests==<latest_version>
-```
-
-And finally, have poetry set up a virtual environment with your dependencies, as well
-as your integration package:
-
-```bash
-poetry install --with test
-```
-
-You're now ready to start writing your integration package!
-
-### Writing your integration
-
-Let's say you're building a simple integration package that provides a `ChatParrotLink`
-chat model integration for LangChain. Here's a simple example of what your project
-structure might look like:
-
-```plaintext
-langchain-parrot-link/
-├── langchain_parrot_link/
-│   ├── __init__.py
-│   └── chat_models.py
-├── tests/
-│   ├── __init__.py
-│   └── test_chat_models.py
-├── pyproject.toml
-└── README.md
-```
-
-All of these files should already exist from step 1, except for 
-`chat_models.py` and `test_chat_models.py`! We will implement `test_chat_models.py` 
-later, following the [standard tests](../standard_tests) guide.
-
-For `chat_models.py`, simply paste the contents of the chat model implementation
-[above](#implementing-langchain-components).
-
-</details>
-
-### Push your package to a public Github repository
-
-This is only required if you want to publish your integration in the LangChain documentation.
-
-1. Create a new repository on GitHub.
-2. Push your code to the repository.
-3. Confirm that your repository is viewable by the public (e.g. in a private browsing window, where you're not logged into Github).
-
-## Implementing LangChain components
-
-LangChain components are subclasses of base classes in [langchain-core](/docs/concepts/architecture/#langchain-core).
-Examples include [chat models](/docs/concepts/chat_models/),
-[vector stores](/docs/concepts/vectorstores/), [tools](/docs/concepts/tools/),
-[embedding models](/docs/concepts/embedding_models/) and [retrievers](/docs/concepts/retrievers/).
-
-Your integration package will typically implement a subclass of at least one of these
-components. Expand the tabs below to see details on each.
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-import CodeBlock from '@theme/CodeBlock';
-
-<Tabs>
-
-    <TabItem value="chat_models" label="Chat models">
-        
-        Refer to the [Custom Chat Model Guide](/docs/how_to/custom_chat_model) guide for
-        detail on a starter chat model [implementation](/docs/how_to/custom_chat_model/#implementation).
-
-        You can start from the following template or langchain-cli command:
-
-        ```bash
-        langchain-cli integration new \
-            --name parrot-link \
-            --name-class ParrotLink \
-            --src integration_template/chat_models.py \
-            --dst langchain_parrot_link/chat_models.py
-        ```
-
-        <details>
-            <summary>Example chat model code</summary>
-
-import ChatModelSource from '../../../../src/theme/integration_template/integration_template/chat_models.py';
-
-        <CodeBlock language="python" title="langchain_parrot_link/chat_models.py">
-            {
-                ChatModelSource.replaceAll('__ModuleName__', 'ParrotLink')
-                    .replaceAll('__package_name__', 'langchain-parrot-link')
-                    .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-                    .replaceAll('__module_name__', 'langchain_parrot_link')
-            }
-        </CodeBlock>
-
-        </details>
-
-    </TabItem>
-    <TabItem value="vector_stores" label="Vector stores">
-
-        Your vector store implementation will depend on your chosen database technology.
-        `langchain-core` includes a minimal
-        [in-memory vector store](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.in_memory.InMemoryVectorStore.html)
-        that we can use as a guide. You can access the code [here](https://github.com/langchain-ai/langchain/blob/master/libs/core/langchain_core/vectorstores/in_memory.py).
-
-        All vector stores must inherit from the [VectorStore](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.base.VectorStore.html)
-        base class. This interface consists of methods for writing, deleting and searching
-        for documents in the vector store.
-
-        `VectorStore` supports a variety of synchronous and asynchronous search types (e.g., 
-        nearest-neighbor or maximum marginal relevance), as well as interfaces for adding
-        documents to the store. See the [API Reference](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.base.VectorStore.html)
-        for all supported methods. The required methods are tabulated below:
-
-        | Method/Property         | Description                                          |
-        |------------------------ |------------------------------------------------------|
-        | `add_documents`         | Add documents to the vector store.                   |
-        | `delete`                | Delete selected documents from vector store (by IDs) |
-        | `get_by_ids`            | Get selected documents from vector store (by IDs)    |
-        | `similarity_search`     | Get documents most similar to a query.               |
-        | `embeddings` (property) | Embeddings object for vector store.                  |
-        | `from_texts`            | Instantiate vector store via adding texts.           |
-
-        Note that `InMemoryVectorStore` implements some optional search types, as well as
-        convenience methods for loading and dumping the object to a file, but this is not
-        necessary for all implementations.
-
-        :::tip
-
-        The [in-memory vector store](https://github.com/langchain-ai/langchain/blob/master/libs/core/langchain_core/vectorstores/in_memory.py)
-        is tested against the standard tests in the LangChain Github repository.
-
-        :::
-
-        <details>
-            <summary>Example vector store code</summary>
-
-import VectorstoreSource from '../../../../src/theme/integration_template/integration_template/vectorstores.py';
-
-        <CodeBlock language="python" title="langchain_parrot_link/vectorstores.py">
-            {
-                VectorstoreSource.replaceAll('__ModuleName__', 'ParrotLink')
-                    .replaceAll('__package_name__', 'langchain-parrot-link')
-                    .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-                    .replaceAll('__module_name__', 'langchain_parrot_link')
-            }
-        </CodeBlock>
-
-        </details>
-
-    </TabItem>
-    <TabItem value="embeddings" label="Embeddings">
-
-Embeddings are used to convert `str` objects from `Document.page_content` fields
-into a vector representation (represented as a list of floats).
-
-Refer to the [Custom Embeddings Guide](/docs/how_to/custom_embeddings) guide for
-detail on a starter embeddings [implementation](/docs/how_to/custom_embeddings/#implementation).
-
-You can start from the following template or langchain-cli command:
-
-```bash
-langchain-cli integration new \
-    --name parrot-link \
-    --name-class ParrotLink \
-    --src integration_template/embeddings.py \
-    --dst langchain_parrot_link/embeddings.py
-```
-
-        <details>
-            <summary>Example embeddings code</summary>
-
-import EmbeddingsSource from '/src/theme/integration_template/integration_template/embeddings.py';
-
-        <CodeBlock language="python" title="langchain_parrot_link/embeddings.py">
-            {
-                EmbeddingsSource.replaceAll('__ModuleName__', 'ParrotLink')
-                    .replaceAll('__package_name__', 'langchain-parrot-link')
-                    .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-                    .replaceAll('__module_name__', 'langchain_parrot_link')
-            }
-        </CodeBlock>
-
-        </details>
-
-    </TabItem>
-    <TabItem value="tools" label="Tools">
-
-Tools are used in 2 main ways:
-
-1. To define an "input schema" or "args schema" to pass to a chat model's tool calling
-feature along with a text request, such that the chat model can generate a "tool call",
-or parameters to call the tool with.
-2. To take a "tool call" as generated above, and take some action and return a response
-that can be passed back to the chat model as a ToolMessage.
-
-The `Tools` class must inherit from the [BaseTool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.base.BaseTool.html#langchain_core.tools.base.BaseTool) base class. This interface has 3 properties and 2 methods that should be implemented in a 
-subclass.
-
-| Method/Property         | Description                                          |
-|------------------------ |------------------------------------------------------|
-| `name`                  | Name of the tool (passed to the LLM too).            |
-| `description`           | Description of the tool (passed to the LLM too).     |
-| `args_schema`           | Define the schema for the tool's input arguments.    |
-| `_run`                  | Run the tool with the given arguments.               |
-| `_arun`                 | Asynchronously run the tool with the given arguments.|
-
-### Properties
-
-`name`, `description`, and `args_schema` are all properties that should be implemented
-in the subclass. `name` and `description` are strings that are used to identify the tool
-and provide a description of what the tool does. Both of these are passed to the LLM,
-and users may override these values depending on the LLM they are using as a form of
-"prompt engineering." Giving these a concise and LLM-usable name and description is
-important for the initial user experience of the tool.
-
-`args_schema` is a Pydantic `BaseModel` that defines the schema for the tool's input
-arguments. This is used to validate the input arguments to the tool, and to provide
-a schema for the LLM to fill out when calling the tool. Similar to the `name` and
-`description` of the overall Tool class, the fields' names (the variable name) and
-description (part of `Field(..., description="description")`) are passed to the LLM, 
-and the values in these fields should be concise and LLM-usable.
-
-### Run Methods
-
-`_run` is the main method that should be implemented in the subclass. This method
-takes in the arguments from `args_schema` and runs the tool, returning a string
-response. This method is usually called in a LangGraph [`ToolNode`](https://langchain-ai.github.io/langgraph/how-tos/tool-calling/), and can also be called in a legacy
-`langchain.agents.AgentExecutor`.
-
-`_arun` is optional because by default, `_run` will be run in an async executor.
-However, if your tool is calling any apis or doing any async work, you should implement
-this method to run the tool asynchronously in addition to `_run`.
-
-### Implementation
-
-You can start from the following template or langchain-cli command:
-
-```bash
-langchain-cli integration new \
-    --name parrot-link \
-    --name-class ParrotLink \
-    --src integration_template/tools.py \
-    --dst langchain_parrot_link/tools.py
-```
-
-        <details>
-            <summary>Example tool code</summary>
-
-import ToolSource from '/src/theme/integration_template/integration_template/tools.py';
-
-        <CodeBlock language="python" title="langchain_parrot_link/tools.py">
-            {
-                ToolSource.replaceAll('__ModuleName__', 'ParrotLink')
-                    .replaceAll('__package_name__', 'langchain-parrot-link')
-                    .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-                    .replaceAll('__module_name__', 'langchain_parrot_link')
-            }
-        </CodeBlock>
-
-        </details>
-
-    </TabItem>
-    <TabItem value="retrievers" label="Retrievers">
-
-Retrievers are used to retrieve documents from APIs, databases, or other sources
-based on a query. The `Retriever` class must inherit from the [BaseRetriever](https://python.langchain.com/api_reference/core/retrievers/langchain_core.retrievers.BaseRetriever.html) base class. This interface has 1 attribute and 2 methods that should be implemented in a subclass.
-
-| Method/Property         | Description                                          |
-|------------------------ |------------------------------------------------------|
-| `k`                     | Default number of documents to retrieve (configurable). |
-| `_get_relevant_documents`| Retrieve documents based on a query.                 |
-| `_aget_relevant_documents`| Asynchronously retrieve documents based on a query.  |
-
-### Attributes
-
-`k` is an attribute that should be implemented in the subclass. This attribute
-can simply be defined at the top of the class with a default value like
-`k: int = 5`. This attribute is the default number of documents to retrieve
-from the retriever, and can be overridden by the user when constructing or calling
-the retriever.
-
-### Methods
-
-`_get_relevant_documents` is the main method that should be implemented in the subclass.
-
-This method takes in a query and returns a list of `Document` objects, which have 2
-main properties:
-
-- `page_content` - the text content of the document
-- `metadata` - a dictionary of metadata about the document
-
-Retrievers are typically directly invoked by a user, e.g. as
-`MyRetriever(k=4).invoke("query")`, which will automatically call `_get_relevant_documents`
-under the hood.
-
-`_aget_relevant_documents` is optional because by default, `_get_relevant_documents` will
-be run in an async executor. However, if your retriever is calling any apis or doing
-any async work, you should implement this method to run the retriever asynchronously
-in addition to `_get_relevant_documents` for performance reasons.
-
-### Implementation
-
-You can start from the following template or langchain-cli command:
-
-```bash
-langchain-cli integration new \
-    --name parrot-link \
-    --name-class ParrotLink \
-    --src integration_template/retrievers.py \
-    --dst langchain_parrot_link/retrievers.py
-```
-
-        <details>
-            <summary>Example retriever code</summary>
-
-import RetrieverSource from '/src/theme/integration_template/integration_template/retrievers.py';
-
-        <CodeBlock language="python" title="langchain_parrot_link/retrievers.py">
-            {
-                RetrieverSource.replaceAll('__ModuleName__', 'ParrotLink')
-                    .replaceAll('__package_name__', 'langchain-parrot-link')
-                    .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-                    .replaceAll('__module_name__', 'langchain_parrot_link')
-            }
-        </CodeBlock>
-
-        </details>
-
-    </TabItem>
-</Tabs>
-
----
-
-## Next Steps
-
-Now that you've implemented your package, you can move on to [testing your integration](../standard_tests) for your integration and successfully run them.
diff --git a/langchain_md_files/contributing/how_to/integrations/publish.mdx b/langchain_md_files/contributing/how_to/integrations/publish.mdx
deleted file mode 100644
index 36aa24f387bacdfa8dfeabd75a4e85e834158266..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/integrations/publish.mdx
+++ /dev/null
@@ -1,150 +0,0 @@
----
-pagination_prev: contributing/how_to/integrations/standard_tests
-pagination_next: null
----
-
-# Publishing your package
-
-Now that your package is implemented and tested, you can:
-
-1. Publish your package to PyPi
-2. Add documentation for your package to the LangChain Monorepo
-
-## Publishing your package to PyPi
-
-This guide assumes you have already implemented your package and written tests for it. If you haven't done that yet, please refer to the [implementation guide](../package) and the [testing guide](../standard_tests).
-
-Note that Poetry is not required to publish a package to PyPi, and we're using it in this guide end-to-end for convenience.
-You are welcome to publish your package using any other method you prefer.
-
-First, make sure you have a PyPi account and have logged in with Poetry:
-
-<details>
-    <summary>How to create a PyPi Token</summary>
-
-1. Go to the [PyPi website](https://pypi.org/) and create an account.
-2. Verify your email address by clicking the link that PyPi emails to you.
-3. Go to your account settings and click "Generate Recovery Codes" to enable 2FA. To generate an API token, you **must** have 2FA enabled currently.
-4. Go to your account settings and [generate a new API token](https://pypi.org/manage/account/token/).
-
-</details>
-
-```bash
-poetry config pypi-token.pypi <your-pypi-token>
-```
-
-Next, build your package:
-
-```bash
-poetry build
-```
-
-Finally, publish your package to PyPi:
-
-```bash
-poetry publish
-```
-
-You're all set! Your package is now available on PyPi and can be installed with `pip install langchain-parrot-link`.
-
-## Adding documentation to the LangChain Monorepo
-
-To add documentation for your package to the LangChain Monorepo, you will need to:
-
-1. Fork and clone the LangChain Monorepo
-2. Make a "Provider Page" at `docs/docs/integrations/providers/<your-package-name>.ipynb`
-3. Make "Component Pages" at `docs/docs/integrations/<component-type>/<your-package-name>.ipynb`
-4. Register your package in `libs/packages.yml`
-5. Submit a PR with **only these changes** to the LangChain Monorepo
-
-### Fork and clone the LangChain Monorepo
-
-First, fork the [LangChain Monorepo](https://github.com/langchain-ai/langchain) to your GitHub account.
-
-Next, clone the repository to your local machine:
-
-```bash
-git clone https://github.com/<your-username>/langchain.git
-```
-
-You're now ready to make your PR!
-
-### Bootstrap your documentation pages with the langchain-cli (recommended)
-
-To make it easier to create the necessary documentation pages, you can use the `langchain-cli` to bootstrap them for you.
-
-First, install the latest version of the `langchain-cli` package:
-
-```bash
-pip install --upgrade langchain-cli
-```
-
-To see the available commands to bootstrap your documentation pages, run:
-
-```bash
-langchain-cli integration create-doc --help
-```
-
-Let's bootstrap a provider page from the root of the monorepo:
-
-```bash
-langchain-cli integration create-doc \
-    --component-type Provider \
-    --destination-dir docs/docs/integrations/providers \
-    --name parrot-link \
-    --name-class ParrotLink
-```
-
-And a chat model component page:
-
-```bash
-langchain-cli integration create-doc \
-    --component-type ChatModel \
-    --destination-dir docs/docs/integrations/chat \
-    --name parrot-link \
-    --name-class ParrotLink
-```
-
-And a vector store component page:
-
-```bash
-langchain-cli integration create-doc \
-    --component-type VectorStore \
-    --destination-dir docs/docs/integrations/vectorstores \
-    --name parrot-link \
-    --name-class ParrotLink
-```
-
-These commands will create the following 3 files, which you should fill out with information about your package:
-
-- `docs/docs/integrations/providers/parrot_link.ipynb`
-- `docs/docs/integrations/chat/parrot_link.ipynb`
-- `docs/docs/integrations/vectorstores/parrot_link.ipynb`
-
-### Manually create your documentation pages (if you prefer)
-
-If you prefer to create the documentation pages manually, you can create the same files listed
-above and fill them out with information about your package.
-
-You can view the templates that the CLI uses to create these files [here](https://github.com/langchain-ai/langchain/tree/master/libs/cli/langchain_cli/integration_template/docs) if helpful!
-
-### Register your package in `libs/packages.yml`
-
-Finally, add your package to the end of the `libs/packages.yml` file in the LangChain Monorepo.
-
-```yaml
-packages:
-  - name: langchain-parrot-link
-    repo: <your github handle>/<your repo>
-    path: .
-```
-
-For `path`, you can use `.` if your package is in the root of your repository, or specify a subdirectory (e.g. `libs/parrot-link`) if it is in a subdirectory.
-
-If you followed the [package bootstrapping guide](../package), then your path is `.`.
-
-### Submit a PR with your changes
-
-Once you have completed these steps, you can submit a PR to the LangChain Monorepo with **only these changes**.
-
-If you have additional changes to request, please submit them in a separate PR.
diff --git a/langchain_md_files/contributing/how_to/integrations/standard_tests.mdx b/langchain_md_files/contributing/how_to/integrations/standard_tests.mdx
deleted file mode 100644
index 5462b966a6404ff9acc00035b4ff5e45d596d0e3..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/integrations/standard_tests.mdx
+++ /dev/null
@@ -1,393 +0,0 @@
----
-pagination_next: contributing/how_to/integrations/publish
-pagination_prev: contributing/how_to/integrations/package
----
-# How to add standard tests to an integration
-
-When creating either a custom class for yourself or to publish in a LangChain integration, it is important to add standard tests to ensure it works as expected. This guide will show you how to add standard tests to each integration type.
-
-## Setup
-
-First, let's install 2 dependencies:
-
-- `langchain-core` will define the interfaces we want to import to define our custom tool.
-- `langchain-tests` will provide the standard tests we want to use, as well as pytest plugins necessary to run them. Recommended to pin to the latest version: <img src="https://img.shields.io/pypi/v/langchain-tests" style={{position:"relative",top:4,left:3}} />
-
-:::note
-
-Because added tests in new versions of `langchain-tests` can break your CI/CD pipelines, we recommend pinning the 
-version of `langchain-tests` to avoid unexpected changes.
-
-:::
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-<Tabs>
-    <TabItem value="poetry" label="Poetry" default>
-If you followed the [previous guide](../package), you should already have these dependencies installed!
-
-```bash
-poetry add langchain-core
-poetry add --group test langchain-tests==<latest_version>
-poetry install --with test
-```
-    </TabItem>
-    <TabItem value="pip" label="Pip">
-```bash
-pip install -U langchain-core langchain-tests
-
-# install current package in editable mode
-pip install --editable .
-```
-    </TabItem>
-</Tabs>
-
-## Add and configure standard tests
-
-There are 2 namespaces in the `langchain-tests` package: 
-
-- [unit tests](../../../concepts/testing.mdx#unit-tests) (`langchain_tests.unit_tests`): designed to be used to test the component in isolation and without access to external services
-- [integration tests](../../../concepts/testing.mdx#integration-tests) (`langchain_tests.integration_tests`): designed to be used to test the component with access to external services (in particular, the external service that the component is designed to interact with).
-
-Both types of tests are implemented as [`pytest` class-based test suites](https://docs.pytest.org/en/7.1.x/getting-started.html#group-multiple-tests-in-a-class).
-
-By subclassing the base classes for each type of standard test (see below), you get all of the standard tests for that type, and you
-can override the properties that the test suite uses to configure the tests.
-
-In order to run the tests in the same way as this guide, we recommend subclassing these
-classes in test files under two test subdirectories:
-
-- `tests/unit_tests` for unit tests
-- `tests/integration_tests` for integration tests
-
-### Implementing standard tests
-
-import CodeBlock from '@theme/CodeBlock';
-
-In the following tabs, we show how to implement the standard tests for
-each component type:
-
-<Tabs>
-
-    <TabItem value="chat_models" label="Chat models">
-
-To configure standard tests for a chat model, we subclass `ChatModelUnitTests` and `ChatModelIntegrationTests`. On each subclass, we override the following `@property` methods to specify the chat model to be tested and the chat model's configuration:
-
-| Property | Description |
-| --- | --- |
-| `chat_model_class` | The class for the chat model to be tested |
-| `chat_model_params` | The parameters to pass to the chat
-model's constructor |
-
-Additionally, chat model standard tests test a range of behaviors, from the most basic requirements (generating a response to a query) to optional capabilities like multi-modal support and tool-calling. For a test run to be successful:
-
-1. If a feature is intended to be supported by the model, it should pass;
-2. If a feature is not intended to be supported by the model, it should be skipped.
-
-Tests for "optional" capabilities are controlled via a set of properties that can be overridden on the test model subclass.
-
-You can see the **entire list of configurable capabilities** in the API references for
-[unit tests](https://python.langchain.com/api_reference/standard_tests/unit_tests/langchain_tests.unit_tests.chat_models.ChatModelUnitTests.html)
-and [integration tests](https://python.langchain.com/api_reference/standard_tests/integration_tests/langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.html).
-
-For example, to enable integration tests for image inputs, we can implement
-
-```python
-@property
-def supports_image_inputs(self) -> bool:
-    return True
-```
-
-on the integration test class.
-
-:::note
-
-Details on what tests are run, how each test can be skipped, and troubleshooting tips for each test can be found in the API references. See details:
-
-- [Unit tests API reference](https://python.langchain.com/api_reference/standard_tests/unit_tests/langchain_tests.unit_tests.chat_models.ChatModelUnitTests.html)
-- [Integration tests API reference](https://python.langchain.com/api_reference/standard_tests/integration_tests/langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.html)
-
-:::
-
-Unit test example:
-
-import ChatUnitSource from '../../../../src/theme/integration_template/tests/unit_tests/test_chat_models.py';
-
-<CodeBlock language="python" title="tests/unit_tests/test_chat_models.py">
-{
-    ChatUnitSource.replaceAll('__ModuleName__', 'ParrotLink')
-        .replaceAll('__package_name__', 'langchain-parrot-link')
-        .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-        .replaceAll('__module_name__', 'langchain_parrot_link')
-}
-</CodeBlock>
-
-Integration test example:
-
-
-import ChatIntegrationSource from '../../../../src/theme/integration_template/tests/integration_tests/test_chat_models.py';
-
-<CodeBlock language="python" title="tests/integration_tests/test_chat_models.py">
-{
-    ChatIntegrationSource.replaceAll('__ModuleName__', 'ParrotLink')
-        .replaceAll('__package_name__', 'langchain-parrot-link')
-        .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-        .replaceAll('__module_name__', 'langchain_parrot_link')
-}
-</CodeBlock>
-
-    </TabItem>
-    <TabItem value="vector_stores" label="Vector stores">
-
-
-Here's how you would configure the standard tests for a typical vector store (using
-`ParrotVectorStore` as a placeholder):
-
-Vector store tests do not have optional capabilities to be configured at this time.
-
-import VectorStoreIntegrationSource from '../../../../src/theme/integration_template/tests/integration_tests/test_vectorstores.py';
-
-<CodeBlock language="python" title="tests/integration_tests/test_vectorstores.py">
-{
-    VectorStoreIntegrationSource.replaceAll('__ModuleName__', 'Parrot')
-        .replaceAll('__package_name__', 'langchain-parrot-link')
-        .replaceAll('__MODULE_NAME__', 'PARROT')
-        .replaceAll('__module_name__', 'langchain_parrot_link')
-}
-</CodeBlock>
-
-Configuring the tests consists of implementing pytest fixtures for setting up an
-empty vector store and tearing down the vector store after the test run ends.
-
-| Fixture | Description |
-| --- | --- |
-| `vectorstore` | A generator that yields an empty vector store for unit tests. The vector store is cleaned up after the test run ends. |
-
-For example, below is the `VectorStoreIntegrationTests` class for the [Chroma](https://python.langchain.com/docs/integrations/vectorstores/chroma/)
-integration:
-
-```python
-from typing import Generator
-
-import pytest
-from langchain_core.vectorstores import VectorStore
-from langchain_tests.integration_tests.vectorstores import VectorStoreIntegrationTests
-
-from langchain_chroma import Chroma
-
-
-class TestChromaStandard(VectorStoreIntegrationTests):
-    @pytest.fixture()
-    def vectorstore(self) -> Generator[VectorStore, None, None]:  # type: ignore
-        """Get an empty vectorstore for unit tests."""
-        store = Chroma(embedding_function=self.get_embeddings())
-        try:
-            yield store
-        finally:
-            store.delete_collection()
-            pass
-
-```
-
-Note that before the initial `yield`, we instantiate the vector store with an
-[embeddings](/docs/concepts/embedding_models/) object. This is a pre-defined
-["fake" embeddings model](https://python.langchain.com/api_reference/standard_tests/integration_tests/langchain_tests.integration_tests.vectorstores.VectorStoreIntegrationTests.html#langchain_tests.integration_tests.vectorstores.VectorStoreIntegrationTests.get_embeddings)
-that will generate short, arbitrary vectors for documents. You can use a different
-embeddings object if desired.
-
-In the `finally` block, we call whatever integration-specific logic is needed to
-bring the vector store to a clean state. This logic is executed in between each test
-(e.g., even if tests fail).
-
-:::note
-
-Details on what tests are run and troubleshooting tips for each test can be found in the [API reference](https://python.langchain.com/api_reference/standard_tests/integration_tests/langchain_tests.integration_tests.vectorstores.VectorStoreIntegrationTests.html).
-
-:::
-
-
-    </TabItem>
-    <TabItem value="embeddings" label="Embeddings">
-
-To configure standard tests for an embeddings model, we subclass `EmbeddingsUnitTests` and `EmbeddingsIntegrationTests`. On each subclass, we override the following `@property` methods to specify the embeddings model to be tested and the embeddings model's configuration:
-
-| Property | Description |
-| --- | --- |
-| `embeddings_class` | The class for the embeddings model to be tested |
-| `embedding_model_params` | The parameters to pass to the embeddings model's constructor |
-
-:::note
-
-Details on what tests are run, how each test can be skipped, and troubleshooting tips for each test can be found in the API references. See details:
-
-- [Unit tests API reference](https://python.langchain.com/api_reference/standard_tests/unit_tests/langchain_tests.unit_tests.embeddings.EmbeddingsUnitTests.html)
-- [Integration tests API reference](https://python.langchain.com/api_reference/standard_tests/integration_tests/langchain_tests.integration_tests.embeddings.EmbeddingsIntegrationTests.html)
-
-:::
-
-Unit test example:
-
-import EmbeddingsUnitSource from '../../../../src/theme/integration_template/tests/unit_tests/test_embeddings.py';
-
-<CodeBlock language="python" title="tests/unit_tests/test_embeddings.py">
-{
-    EmbeddingsUnitSource.replaceAll('__ModuleName__', 'ParrotLink')
-        .replaceAll('__package_name__', 'langchain-parrot-link')
-        .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-        .replaceAll('__module_name__', 'langchain_parrot_link')
-}
-</CodeBlock>
-
-Integration test example:
-
-
-```python title="tests/integration_tests/test_embeddings.py"
-from typing import Type
-
-from langchain_parrot_link.embeddings import ParrotLinkEmbeddings
-from langchain_tests.integration_tests import EmbeddingsIntegrationTests
-
-
-class TestParrotLinkEmbeddingsIntegration(EmbeddingsIntegrationTests):
-    @property
-    def embeddings_class(self) -> Type[ParrotLinkEmbeddings]:
-        return ParrotLinkEmbeddings
-
-    @property
-    def embedding_model_params(self) -> dict:
-        return {"model": "nest-embed-001"}
-```
-
-import EmbeddingsIntegrationSource from '../../../../src/theme/integration_template/tests/integration_tests/test_embeddings.py';
-
-<CodeBlock language="python" title="tests/integration_tests/test_embeddings.py">
-{
-    EmbeddingsIntegrationSource.replaceAll('__ModuleName__', 'ParrotLink')
-        .replaceAll('__package_name__', 'langchain-parrot-link')
-        .replaceAll('__MODULE_NAME__', 'PARROT_LINK')
-        .replaceAll('__module_name__', 'langchain_parrot_link')
-}
-</CodeBlock>
-
-    </TabItem>
-    <TabItem value="tools" label="Tools">
-
-To configure standard tests for a tool, we subclass `ToolsUnitTests` and
-`ToolsIntegrationTests`. On each subclass, we override the following `@property` methods
-to specify the tool to be tested and the tool's configuration:
-
-| Property | Description |
-| --- | --- |
-| `tool_constructor` | The constructor for the tool to be tested, or an instantiated tool. |
-| `tool_constructor_params` | The parameters to pass to the tool (optional). |
-| `tool_invoke_params_example` | An example of the parameters to pass to the tool's `invoke` method. |
-
-If you are testing a tool class and pass a class like `MyTool` to `tool_constructor`, you can pass the parameters to the constructor in `tool_constructor_params`. 
-
-If you are testing an instantiated tool, you can pass the instantiated tool to `tool_constructor` and do not
-override `tool_constructor_params`.
-
-:::note
-
-Details on what tests are run, how each test can be skipped, and troubleshooting tips for each test can be found in the API references. See details:
-
-- [Unit tests API reference](https://python.langchain.com/api_reference/standard_tests/unit_tests/langchain_tests.unit_tests.tools.ToolsUnitTests.html)
-- [Integration tests API reference](https://python.langchain.com/api_reference/standard_tests/integration_tests/langchain_tests.integration_tests.tools.ToolsIntegrationTests.html)
-
-:::
-
-import ToolsUnitSource from '../../../../src/theme/integration_template/tests/unit_tests/test_tools.py';
-
-<CodeBlock language="python" title="tests/unit_tests/test_tools.py">
-{
-    ToolsUnitSource.replaceAll('__ModuleName__', 'Parrot')
-        .replaceAll('__package_name__', 'langchain-parrot-link')
-        .replaceAll('__MODULE_NAME__', 'PARROT')
-        .replaceAll('__module_name__', 'langchain_parrot_link')
-}
-</CodeBlock>
-
-import ToolsIntegrationSource from '../../../../src/theme/integration_template/tests/integration_tests/test_tools.py';
-
-<CodeBlock language="python" title="tests/integration_tests/test_tools.py">
-{
-    ToolsIntegrationSource.replaceAll('__ModuleName__', 'Parrot')
-        .replaceAll('__package_name__', 'langchain-parrot-link')
-        .replaceAll('__MODULE_NAME__', 'PARROT')
-        .replaceAll('__module_name__', 'langchain_parrot_link')
-}
-</CodeBlock>
-
-    </TabItem>
-
-    <TabItem value="retrievers" label="Retrievers">
-
-To configure standard tests for a retriever, we subclass `RetrieversUnitTests` and
-`RetrieversIntegrationTests`. On each subclass, we override the following `@property` methods
-
-| Property | Description |
-| --- | --- |
-| `retriever_constructor` | The class for the retriever to be tested |
-| `retriever_constructor_params` | The parameters to pass to the retriever's constructor |
-| `retriever_query_example` | An example of the query to pass to the retriever's `invoke` method |
-
-:::note
-
-Details on what tests are run and troubleshooting tips for each test can be found in the [API reference](https://python.langchain.com/api_reference/standard_tests/integration_tests/langchain_tests.integration_tests.retrievers.RetrieversIntegrationTests.html).
-
-:::
-
-import RetrieverIntegrationSource from '../../../../src/theme/integration_template/tests/integration_tests/test_retrievers.py';
-
-<CodeBlock language="python" title="tests/integration_tests/test_retrievers.py">
-{
-    RetrieverIntegrationSource.replaceAll('__ModuleName__', 'Parrot')
-        .replaceAll('__package_name__', 'langchain-parrot-link')
-        .replaceAll('__MODULE_NAME__', 'PARROT')
-        .replaceAll('__module_name__', 'langchain_parrot_link')
-}
-</CodeBlock>
-
-    </TabItem>
-</Tabs>
-
----
-
-### Running the tests
-
-You can run these with the following commands from your project root
-
-<Tabs>
-    <TabItem value="poetry" label="Poetry" default>
-
-```bash
-# run unit tests without network access
-poetry run pytest --disable-socket --allow-unix-socket --asyncio-mode=auto tests/unit_tests
-
-# run integration tests
-poetry run pytest --asyncio-mode=auto tests/integration_tests
-```
-
-    </TabItem>
-    <TabItem value="pip" label="Pip">
-
-```bash
-# run unit tests without network access
-pytest --disable-socket --allow-unix-socket --asyncio-mode=auto tests/unit_tests
-
-# run integration tests
-pytest --asyncio-mode=auto tests/integration_tests
-```
-
-    </TabItem>
-</Tabs>
-
-## Test suite information and troubleshooting
-
-For a full list of the standard test suites that are available, as well as
-information on which tests are included and how to troubleshoot common issues,
-see the [Standard Tests API Reference](https://python.langchain.com/api_reference/standard_tests/index.html).
-
-You can see troubleshooting guides under the individual test suites listed in that API Reference. For example,
-[here is the guide for `ChatModelIntegrationTests.test_usage_metadata`](https://python.langchain.com/api_reference/standard_tests/integration_tests/langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.html#langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_usage_metadata).
diff --git a/langchain_md_files/contributing/how_to/testing.mdx b/langchain_md_files/contributing/how_to/testing.mdx
deleted file mode 100644
index 0afccc2a36087d0e3edaae0a241a30c7f30e3cc7..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/how_to/testing.mdx
+++ /dev/null
@@ -1,147 +0,0 @@
----
-sidebar_position: 6
----
-
-# Testing
-
-All of our packages have unit tests and integration tests, and we favor unit tests over integration tests.
-
-Unit tests run on every pull request, so they should be fast and reliable.
-
-Integration tests run once a day, and they require more setup, so they should be reserved for confirming interface points with external services.
-
-## Unit Tests
-
-Unit tests cover modular logic that does not require calls to outside APIs.
-If you add new logic, please add a unit test.
-
-To install dependencies for unit tests:
-
-```bash
-poetry install --with test
-```
-
-To run unit tests:
-
-```bash
-make test
-```
-
-To run unit tests in Docker:
-
-```bash
-make docker_tests
-```
-
-To run a specific test:
-
-```bash
-TEST_FILE=tests/unit_tests/test_imports.py make test
-```
-
-## Integration Tests
-
-Integration tests cover logic that requires making calls to outside APIs (often integration with other services).
-If you add support for a new external API, please add a new integration test.
-
-**Warning:** Almost no tests should be integration tests.
-
-  Tests that require making network connections make it difficult for other
-  developers to test the code.
-
-  Instead favor relying on `responses` library and/or mock.patch to mock
-  requests using small fixtures.
-
-To install dependencies for integration tests:
-
-```bash
-poetry install --with test,test_integration
-```
-
-To run integration tests:
-
-```bash
-make integration_tests
-```
-
-### Prepare
-
-The integration tests use several search engines and databases. The tests
-aim to verify the correct behavior of the engines and databases according to
-their specifications and requirements.
-
-To run some integration tests, such as tests located in
-`tests/integration_tests/vectorstores/`, you will need to install the following
-software:
-
-- Docker
-- Python 3.8.1 or later
-
-Any new dependencies should be added by running:
-
-```bash
-# add package and install it after adding:
-poetry add tiktoken@latest --group "test_integration" && poetry install --with test_integration
-```
-
-Before running any tests, you should start a specific Docker container that has all the
-necessary dependencies installed. For instance, we use the `elasticsearch.yml` container
-for `test_elasticsearch.py`:
-
-```bash
-cd tests/integration_tests/vectorstores/docker-compose
-docker-compose -f elasticsearch.yml up
-```
-
-For environments that requires more involving preparation, look for `*.sh`. For instance,
-`opensearch.sh` builds a required docker image and then launch opensearch.
-
-
-### Prepare environment variables for local testing:
-
-- copy `tests/integration_tests/.env.example` to `tests/integration_tests/.env`
-- set variables in `tests/integration_tests/.env` file, e.g `OPENAI_API_KEY`
-
-Additionally, it's important to note that some integration tests may require certain
-environment variables to be set, such as `OPENAI_API_KEY`. Be sure to set any required
-environment variables before running the tests to ensure they run correctly.
-
-### Recording HTTP interactions with pytest-vcr
-
-Some of the integration tests in this repository involve making HTTP requests to
-external services. To prevent these requests from being made every time the tests are
-run, we use pytest-vcr to record and replay HTTP interactions.
-
-When running tests in a CI/CD pipeline, you may not want to modify the existing
-cassettes. You can use the --vcr-record=none command-line option to disable recording
-new cassettes. Here's an example:
-
-```bash
-pytest --log-cli-level=10 tests/integration_tests/vectorstores/test_pinecone.py --vcr-record=none
-pytest tests/integration_tests/vectorstores/test_elasticsearch.py --vcr-record=none
-
-```
-
-### Run some tests with coverage:
-
-```bash
-pytest tests/integration_tests/vectorstores/test_elasticsearch.py --cov=langchain --cov-report=html
-start "" htmlcov/index.html || open htmlcov/index.html
-
-```
-
-## Coverage
-
-Code coverage (i.e. the amount of code that is covered by unit tests) helps identify areas of the code that are potentially more or less brittle.
-
-Coverage requires the dependencies for integration tests:
-
-```bash
-poetry install --with test_integration
-```
-
-To get a report of current coverage, run the following:
-
-```bash
-make coverage
-```
diff --git a/langchain_md_files/contributing/index.mdx b/langchain_md_files/contributing/index.mdx
deleted file mode 100644
index 90e677bcf5f7788d191ae9aff200ee1d89806d90..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/index.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
----
-sidebar_position: 0
----
-# Welcome Contributors
-
-Hi there! Thank you for your interest in contributing to LangChain.
-As an open-source project in a fast developing field, we are extremely open to contributions, whether they involve new features, improved infrastructure, better documentation, or bug fixes.
-
-## Tutorials
-
-More coming soon! We are working on tutorials to help you make your first contribution to the project.
-
-- [**Make your first docs PR**](tutorials/docs.mdx)
-
-## How-to Guides
-
-- [**Documentation**](how_to/documentation/index.mdx): Help improve our docs, including this one!
-- [**Code**](how_to/code/index.mdx): Help us write code, fix bugs, or improve our infrastructure.
-- [**Integrations**](how_to/integrations/index.mdx): Help us integrate with your favorite vendors and tools.
-- [**Standard Tests**](how_to/integrations/standard_tests): Ensure your integration passes an expected set of tests.
-
-## Reference
-
-- [**Repository Structure**](reference/repo_structure.mdx): Understand the high level structure of the repository.
-- [**Review Process**](reference/review_process.mdx): Learn about the review process for pull requests.
-- [**Frequently Asked Questions (FAQ)**](reference/faq.mdx): Get answers to common questions about contributing.
-
-## Community
-
-### 💭 GitHub Discussions
-
-We have a [discussions](https://github.com/langchain-ai/langchain/discussions) page where users can ask usage questions, discuss design decisions, and propose new features.
-
-If you are able to help answer questions, please do so! This will allow the maintainers to spend more time focused on development and bug fixing.
-
-### 🚩 GitHub Issues
-
-Our [issues](https://github.com/langchain-ai/langchain/issues) page is kept up to date with bugs, improvements, and feature requests.
-
-There is a [taxonomy of labels](https://github.com/langchain-ai/langchain/labels?sort=count-desc)
-to help with sorting and discovery of issues of interest. Please use these to help
-organize issues. Check out the [Help Wanted](https://github.com/langchain-ai/langchain/labels/help%20wanted)
-and [Good First Issue](https://github.com/langchain-ai/langchain/labels/good%20first%20issue)
-tags for recommendations.
-
-If you start working on an issue, please assign it to yourself.
-
-If you are adding an issue, please try to keep it focused on a single, modular bug/improvement/feature.
-If two issues are related, or blocking, please link them rather than combining them.
-
-We will try to keep these issues as up-to-date as possible, though
-with the rapid rate of development in this field some may get out of date.
-If you notice this happening, please let us know.
-
-### 📢 Community Slack
-
-We have a [community slack](https://www.langchain.com/join-community) where you can ask questions, get help, and discuss the project with other contributors and users.
-
-### 🙋 Getting Help
-
-Our goal is to have the simplest developer setup possible. Should you experience any difficulty getting setup, please
-ask in [community slack](https://www.langchain.com/join-community) or open a [discussion on GitHub](https://github.com/langchain-ai/langchain/discussions).
-
-In a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase.
-If you are finding these difficult (or even just annoying) to work with, feel free to ask in [community slack](https://www.langchain.com/join-community)!
diff --git a/langchain_md_files/contributing/reference/faq.mdx b/langchain_md_files/contributing/reference/faq.mdx
deleted file mode 100644
index 178796ca33f5bf0d47657cb8c16dad9d746ea5a6..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/reference/faq.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
----
-sidebar_position: 6
-sidebar_label: FAQ
----
-# Frequently Asked Questions
-
-## Pull Requests (PRs)
-
-### How do I allow maintainers to edit my PR?
-
-When you submit a pull request, there may be additional changes
-necessary before merging it. Oftentimes, it is more efficient for the
-maintainers to make these changes themselves before merging, rather than asking you
-to do so in code review.
-
-By default, most pull requests will have a 
-`✅ Maintainers are allowed to edit this pull request.`
-badge in the right-hand sidebar.
-
-If you do not see this badge, you may have this setting off for the fork you are
-pull-requesting from. See [this Github docs page](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/allowing-changes-to-a-pull-request-branch-created-from-a-fork)
-for more information.
-
-Notably, Github doesn't allow this setting to be enabled for forks in **organizations** ([issue](https://github.com/orgs/community/discussions/5634)).
-If you are working in an organization, we recommend submitting your PR from a personal
-fork in order to enable this setting.
-
-### Why hasn't my PR been reviewed?
-
-Please reference our [Review Process](review_process.mdx).
-
-### Why was my PR closed?
-
-Please reference our [Review Process](review_process.mdx).
-
-### I think my PR was closed in a way that didn't follow the review process. What should I do?
-
-Tag `@ccurme` in the PR comments referencing the portion of the review
-process that you believe was not followed. We'll take a look!
diff --git a/langchain_md_files/contributing/reference/index.mdx b/langchain_md_files/contributing/reference/index.mdx
deleted file mode 100644
index 4311a01cda17c523e2366eceab829ef5167508ce..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/reference/index.mdx
+++ /dev/null
@@ -1,5 +0,0 @@
-# Reference
-
-- [**Repository Structure**](repo_structure.mdx): Understand the high level structure of the repository.
-- [**Review Process**](review_process.mdx): Learn about the review process for pull requests.
-- [**Frequently Asked Questions (FAQ)**](faq.mdx): Get answers to common questions about contributing.
\ No newline at end of file
diff --git a/langchain_md_files/contributing/reference/repo_structure.mdx b/langchain_md_files/contributing/reference/repo_structure.mdx
deleted file mode 100644
index 8838fdfb9357b81dd45f4098aea7f23b59cd787b..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/reference/repo_structure.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
----
-sidebar_position: 0.5
----
-# Repository Structure
-
-If you plan on contributing to LangChain code or documentation, it can be useful
-to understand the high level structure of the repository.
-
-LangChain is organized as a [monorepo](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
-You can check out our [installation guide](/docs/how_to/installation/) for more on how they fit together.
-
-Here's the structure visualized as a tree:
-
-```text
-.
-├── cookbook # Tutorials and examples
-├── docs # Contains content for the documentation here: https://python.langchain.com/
-├── libs
-│   ├── langchain
-│   │   ├── langchain
-│   │   ├── tests/unit_tests # Unit tests (present in each package not shown for brevity)
-│   │   ├── tests/integration_tests # Integration tests (present in each package not shown for brevity)
-│   ├── community # Third-party integrations
-│   │   ├── langchain-community
-│   ├── core # Base interfaces for key abstractions
-│   │   ├── langchain-core
-│   ├── experimental # Experimental components and chains
-│   │   ├── langchain-experimental
-|   ├── cli # Command line interface
-│   │   ├── langchain-cli
-│   ├── text-splitters
-│   │   ├── langchain-text-splitters
-│   ├── standard-tests
-│   │   ├── langchain-standard-tests
-│   ├── partners
-│       ├── langchain-partner-1
-│       ├── langchain-partner-2
-│       ├── ...
-│
-├── templates # A collection of easily deployable reference architectures for a wide variety of tasks.
-```
-
-The root directory also contains the following files:
-
-* `pyproject.toml`: Dependencies for building docs and linting docs, cookbook.
-* `Makefile`: A file that contains shortcuts for building, linting and docs and cookbook.
-
-There are other files in the root directory level, but their presence should be self-explanatory. Feel free to browse around!
-
-## Documentation
-
-The `/docs` directory contains the content for the documentation that is shown
-at https://python.langchain.com/ and the associated API Reference https://python.langchain.com/api_reference/langchain/index.html.
-
-See the [documentation](../how_to/documentation/index.mdx) guidelines to learn how to contribute to the documentation.
-
-## Code
-
-The `/libs` directory contains the code for the LangChain packages.
-
-To learn more about how to contribute code see the following guidelines:
-
-- [Code](../how_to/code/index.mdx): Learn how to develop in the LangChain codebase.
-- [Integrations](../how_to/integrations/index.mdx): Learn how to contribute to third-party integrations to `langchain-community` or to start a new partner package.
-- [Testing](../how_to/testing.mdx): Guidelines to learn how to write tests for the packages.
diff --git a/langchain_md_files/contributing/reference/review_process.mdx b/langchain_md_files/contributing/reference/review_process.mdx
deleted file mode 100644
index 312540bf32cdd7edc34324b6ecad1129ec5803e1..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/reference/review_process.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
-# Review Process
-
-## Overview
-
-This document outlines the process used by the LangChain maintainers for reviewing pull requests (PRs). The primary objective of this process is to enhance the LangChain developer experience.
-
-## Review Statuses
-
-We categorize PRs using three main statuses, which are marked as project item statuses in the right sidebar and can be viewed in detail [here](https://github.com/orgs/langchain-ai/projects/12/views/1).
-
-- **Triage**: 
-  - Initial status for all newly submitted PRs.
-  - Requires a maintainer to categorize it into one of the other statuses.
-
-- **Needs Support**:
-  - PRs that require community feedback or additional input before moving forward.
-  - Automatically promoted to the backlog if it receives 5 upvotes.
-  - An auto-comment is generated when this status is applied, explaining the flow and the upvote requirement.
-  - If the PR remains in this status for 25 days, it will be marked as “stale” via auto-comment.
-  - PRs will be auto-closed after 30 days if no further action is taken.
-
-- **In Review**:
-  - PRs that are actively under review by our team.
-  - These are regularly reviewed and monitored.
-
-**Note:** A PR may only have one status at a time.
-
-**Note:** You may notice 3 additional statuses of Done, Closed, and Internal that
-are external to this lifecycle. Done and Closed PRs have been merged or closed,
-respectively. Internal is for PRs submitted by core maintainers, and these PRs are owned
-by the submitter.
-
-## Review Guidelines
-
-1. **PRs that touch /libs/core**:
-   - PRs that directly impact core code and are likely to affect end users.
-   - **Triage Guideline**: most PRs should either go straight to `In Review` or closed.
-   - These PRs are given top priority and are reviewed the fastest.
-   - PRs that don't have a **concise** descriptions of their motivation (either in PR summary or in a linked issue) are likely to be closed without an in-depth review. Please do not generate verbose PR descriptions with an LLM.
-   - PRs that don't have unit tests are likely to be closed.
-   - Feature requests should first be opened as a GitHub issue and discussed with the LangChain maintainers. Large PRs submitted without prior discussion are likely to be closed.
-
-2. **PRs that touch /libs/langchain**:
-   - High-impact PRs that are closely related to core PRs but slightly lower in priority.
-   - **Triage Guideline**: most PRs should either go straight to `In Review` or closed.
-   - These are reviewed and closed aggressively, similar to core PRs.
-   - New feature requests should be discussed with the core maintainer team beforehand in an issue.
-
-3. **PRs that touch /libs/partners/**:
-   - PRs involving integration packages.
-   - **Triage Guideline**: most PRs should either go straight to `In Review` or closed.
-   - The review may be conducted by our team or handed off to the partner's development team, depending on the PR's content.
-   - We maintain communication lines with most partner dev teams to facilitate this process.
-
-4. **Community PRs**:
-   - Most community PRs will get an initial status of "needs support".
-   - **Triage Guideline**: most PRs should go to `Needs support`. Bugfixes on high-traffic integrations should go straight to `In review`.
-   - **Triage Guideline**: all new features and integrations should go to `Needs support` and will be closed if they do not get enough support (measured by upvotes or comments).
-   - PRs in the `Needs Support` status for 20 days are marked as “stale” and will be closed after 30 days if no action is taken.
-
-5. **Documentation PRs**:
-   - PRs that touch the documentation content in docs/docs.
-   - **Triage Guideline**:
-      - PRs that fix typos or small errors in a single file and pass CI should go straight to `In Review`.
-      - PRs that make changes that have been discussed and agreed upon in an issue should go straight to `In Review`.
-      - PRs that add new pages or change the structure of the documentation should go to `Needs Support`.
-   - We strive to standardize documentation formats to streamline the review process.
-   - CI jobs run against documentation to ensure adherence to standards, automating much of the review.
-
-6. **PRs must be in English**:
-   - PRs that are not in English will be closed without review.
-   - This is to ensure that all maintainers can review the PRs effectively.
-
-## How to see a PR's status
-
-See screenshot:
-
-![PR Status](/img/review_process_status.png)
-
-*To see the status of all open PRs, please visit the [LangChain Project Board](https://github.com/orgs/langchain-ai/projects/12/views/2).*
-
-## Review Prioritization
-
-Our goal is to provide the best possible development experience by focusing on making software that:
-
-- Works: Works as intended (is bug-free).
-- Is useful: Improves LLM app development with components that work off-the-shelf and runtimes that simplify app building.
-- Is easy: Is intuitive to use and well-documented.
-
-We believe this process reflects our priorities and are open to feedback if you feel it does not.
-
-## Github Discussion
-
-We welcome your feedback on this process. Please feel free to add a comment in 
-[this GitHub Discussion](https://github.com/langchain-ai/langchain/discussions/25920).
diff --git a/langchain_md_files/contributing/tutorials/docs.mdx b/langchain_md_files/contributing/tutorials/docs.mdx
deleted file mode 100644
index 9eaf81653a0a76f038ad50df4ad42353b71d127b..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/tutorials/docs.mdx
+++ /dev/null
@@ -1,51 +0,0 @@
-# Make your first docs PR
-
-This tutorial will guide you through making a simple documentation edit, like correcting a typo.
-
-### **Prerequisites**
-- GitHub account.
-- Familiarity with GitHub pull requests (basic understanding).
-
----
-
-## Editing a Documentation Page on GitHub
-
-Sometimes you want to make a small change, like fixing a typo, and the easiest way to do this is to use GitHub's editor directly.
-
-### **Steps**
-
-1. **Navigate to the documentation page in the LangChain docs:**
-   - On the documentation page, find the green "Edit this page" link at the bottom of the page.
-   - Click the button to be directed to the GitHub editor.
-   - If the file you're editing is a Jupyter Notebook (.ipynb) instead of a Markdown (.md, .mdx)
-        file, we recommend following the steps in section 3.
-
-2. **Fork the repository:**
-   - If you haven't already, GitHub will prompt you to fork the repository to your account.
-   - Make sure to fork the repository into your **personal account and not an organization** ([why?](../reference/faq.mdx#how-do-i-allow-maintainers-to-edit-my-pr)).
-   - Click the "Fork this repository" button to create a copy of the repository under your account.
-   - After forking, you'll automatically be redirected to the correct editor.
-
-3. **Make your changes:**
-   - Correct the typo directly in the GitHub editor.
-
-4. **Commit your changes:**
-   - Click the "Commit changes..." button at the top-right corner of the page.
-   - Give your commit a title like "Fix typo in X section."
-   - Optionally, write an extended commit description.
-   - Click "Propose changes"
-
-5. **Submit a pull request (PR):**
-   - GitHub will redirect you to a page where you can create a pull request.
-   - First, review your proposed changes to ensure they are correct.
-   - Click **Create pull request**.
-   - Give your PR a title like `docs: Fix typo in X section`.
-   - Follow the checklist in the PR description template.
-
-## Getting a Review
-
-Once you've submitted the pull request, it will be reviewed by the maintainers. You may receive feedback or requests for changes. Keep an eye on the PR to address any comments.
-
-Docs PRs are typically reviewed within a few days, but it may take longer depending on the complexity of the change and the availability of maintainers.
-
-For more information on reviews, see the [Review Process](../reference/review_process.mdx).
diff --git a/langchain_md_files/contributing/tutorials/index.mdx b/langchain_md_files/contributing/tutorials/index.mdx
deleted file mode 100644
index fc557736189581cb16d056ce3eee3ef1c2e4cf0d..0000000000000000000000000000000000000000
--- a/langchain_md_files/contributing/tutorials/index.mdx
+++ /dev/null
@@ -1,5 +0,0 @@
-# Tutorials
-
-More coming soon! We are working on tutorials to help you make your first contribution to the project.
-
-- [**Make your first docs PR**](docs.mdx)
diff --git a/langchain_md_files/how_to/document_loader_json.mdx b/langchain_md_files/how_to/document_loader_json.mdx
deleted file mode 100644
index 83af3fdf93ef0e0b0d1732eab6e4104bca77c2cf..0000000000000000000000000000000000000000
--- a/langchain_md_files/how_to/document_loader_json.mdx
+++ /dev/null
@@ -1,374 +0,0 @@
-# How to load JSON
-
-[JSON (JavaScript Object Notation)](https://en.wikipedia.org/wiki/JSON) is an open standard file format and data interchange format that uses human-readable text to store and transmit data objects consisting of attribute–value pairs and arrays (or other serializable values).
-
-[JSON Lines](https://jsonlines.org/) is a file format where each line is a valid JSON value.
-
-LangChain implements a [JSONLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html) 
-to convert JSON and JSONL data into LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) 
-objects. It uses a specified [jq schema](https://en.wikipedia.org/wiki/Jq_(programming_language)) to parse the JSON files, allowing for the extraction of specific fields into the content 
-and metadata of the LangChain Document.
-
-It uses the `jq` python package. Check out this [manual](https://stedolan.github.io/jq/manual/#Basicfilters) for a detailed documentation of the `jq` syntax.
-
-Here we will demonstrate: 
-
-- How to load JSON and JSONL data into the content of a LangChain `Document`;
-- How to load JSON and JSONL data into metadata associated with a `Document`.
-
-
-```python
-#!pip install jq
-```
-
-
-```python
-from langchain_community.document_loaders import JSONLoader
-```
-
-
-```python
-import json
-from pathlib import Path
-from pprint import pprint
-
-
-file_path='./example_data/facebook_chat.json'
-data = json.loads(Path(file_path).read_text())
-```
-
-
-```python
-pprint(data)
-```
-```output
-    {'image': {'creation_timestamp': 1675549016, 'uri': 'image_of_the_chat.jpg'},
-     'is_still_participant': True,
-     'joinable_mode': {'link': '', 'mode': 1},
-     'magic_words': [],
-     'messages': [{'content': 'Bye!',
-                   'sender_name': 'User 2',
-                   'timestamp_ms': 1675597571851},
-                  {'content': 'Oh no worries! Bye',
-                   'sender_name': 'User 1',
-                   'timestamp_ms': 1675597435669},
-                  {'content': 'No Im sorry it was my mistake, the blue one is not '
-                              'for sale',
-                   'sender_name': 'User 2',
-                   'timestamp_ms': 1675596277579},
-                  {'content': 'I thought you were selling the blue one!',
-                   'sender_name': 'User 1',
-                   'timestamp_ms': 1675595140251},
-                  {'content': 'Im not interested in this bag. Im interested in the '
-                              'blue one!',
-                   'sender_name': 'User 1',
-                   'timestamp_ms': 1675595109305},
-                  {'content': 'Here is $129',
-                   'sender_name': 'User 2',
-                   'timestamp_ms': 1675595068468},
-                  {'photos': [{'creation_timestamp': 1675595059,
-                               'uri': 'url_of_some_picture.jpg'}],
-                   'sender_name': 'User 2',
-                   'timestamp_ms': 1675595060730},
-                  {'content': 'Online is at least $100',
-                   'sender_name': 'User 2',
-                   'timestamp_ms': 1675595045152},
-                  {'content': 'How much do you want?',
-                   'sender_name': 'User 1',
-                   'timestamp_ms': 1675594799696},
-                  {'content': 'Goodmorning! $50 is too low.',
-                   'sender_name': 'User 2',
-                   'timestamp_ms': 1675577876645},
-                  {'content': 'Hi! Im interested in your bag. Im offering $50. Let '
-                              'me know if you are interested. Thanks!',
-                   'sender_name': 'User 1',
-                   'timestamp_ms': 1675549022673}],
-     'participants': [{'name': 'User 1'}, {'name': 'User 2'}],
-     'thread_path': 'inbox/User 1 and User 2 chat',
-     'title': 'User 1 and User 2 chat'}
-```
-
-
-
-## Using `JSONLoader`
-
-Suppose we are interested in extracting the values under the `content` field within the `messages` key of the JSON data. This can easily be done through the `JSONLoader` as shown below.
-
-
-### JSON file
-
-```python
-loader = JSONLoader(
-    file_path='./example_data/facebook_chat.json',
-    jq_schema='.messages[].content',
-    text_content=False)
-
-data = loader.load()
-```
-
-
-```python
-pprint(data)
-```
-
-```output
-    [Document(page_content='Bye!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1}),
-     Document(page_content='Oh no worries! Bye', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2}),
-     Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3}),
-     Document(page_content='I thought you were selling the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4}),
-     Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5}),
-     Document(page_content='Here is $129', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6}),
-     Document(page_content='', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7}),
-     Document(page_content='Online is at least $100', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8}),
-     Document(page_content='How much do you want?', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9}),
-     Document(page_content='Goodmorning! $50 is too low.', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10}),
-     Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11})]
-```
-
-
-
-### JSON Lines file
-
-If you want to load documents from a JSON Lines file, you pass `json_lines=True`
-and specify `jq_schema` to extract `page_content` from a single JSON object.
-
-```python
-file_path = './example_data/facebook_chat_messages.jsonl'
-pprint(Path(file_path).read_text())
-```
-
-```output
-    ('{"sender_name": "User 2", "timestamp_ms": 1675597571851, "content": "Bye!"}\n'
-     '{"sender_name": "User 1", "timestamp_ms": 1675597435669, "content": "Oh no '
-     'worries! Bye"}\n'
-     '{"sender_name": "User 2", "timestamp_ms": 1675596277579, "content": "No Im '
-     'sorry it was my mistake, the blue one is not for sale"}\n')
-```
-
-
-
-```python
-loader = JSONLoader(
-    file_path='./example_data/facebook_chat_messages.jsonl',
-    jq_schema='.content',
-    text_content=False,
-    json_lines=True)
-
-data = loader.load()
-```
-
-```python
-pprint(data)
-```
-
-```output
-    [Document(page_content='Bye!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}),
-     Document(page_content='Oh no worries! Bye', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 2}),
-     Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 3})]
-```
-
-
-
-Another option is to set `jq_schema='.'` and provide `content_key`:
-
-```python
-loader = JSONLoader(
-    file_path='./example_data/facebook_chat_messages.jsonl',
-    jq_schema='.',
-    content_key='sender_name',
-    json_lines=True)
-
-data = loader.load()
-```
-
-```python
-pprint(data)
-```
-
-```output
-    [Document(page_content='User 2', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}),
-     Document(page_content='User 1', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 2}),
-     Document(page_content='User 2', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat_messages.jsonl', 'seq_num': 3})]
-```
-
-
-### JSON file with jq schema `content_key`
-
-To load documents from a JSON file using the content_key within the jq schema, set is_content_key_jq_parsable=True. 
-Ensure that content_key is compatible and can be parsed using the jq schema.
-
-```python
-file_path = './sample.json'
-pprint(Path(file_path).read_text())
-```
-
-```outputjson
-    {"data": [
-        {"attributes": {
-            "message": "message1",
-            "tags": [
-            "tag1"]},
-        "id": "1"},
-        {"attributes": {
-            "message": "message2",
-            "tags": [
-            "tag2"]},
-        "id": "2"}]}
-```
-
-
-
-```python
-loader = JSONLoader(
-    file_path=file_path,
-    jq_schema=".data[]",
-    content_key=".attributes.message",
-    is_content_key_jq_parsable=True,
-)
-
-data = loader.load()
-```
-
-```python
-pprint(data)
-```
-
-```output
-    [Document(page_content='message1', metadata={'source': '/path/to/sample.json', 'seq_num': 1}),
-     Document(page_content='message2', metadata={'source': '/path/to/sample.json', 'seq_num': 2})]
-```
-
-
-## Extracting metadata
-
-Generally, we want to include metadata available in the JSON file into the documents that we create from the content.
-
-The following demonstrates how metadata can be extracted using the `JSONLoader`.
-
-There are some key changes to be noted. In the previous example where we didn't collect the metadata, we managed to directly specify in the schema where the value for the `page_content` can be extracted from.
-
-```
-.messages[].content
-```
-
-In the current example, we have to tell the loader to iterate over the records in the `messages` field. The jq_schema then has to be:
-
-```
-.messages[]
-```
-
-This allows us to pass the records (dict) into the `metadata_func` that has to be implemented. The `metadata_func` is responsible for identifying which pieces of information in the record should be included in the metadata stored in the final `Document` object.
-
-Additionally, we now have to explicitly specify in the loader, via the `content_key` argument, the key from the record where the value for the `page_content` needs to be extracted from.
-
-
-```python
-# Define the metadata extraction function.
-def metadata_func(record: dict, metadata: dict) -> dict:
-
-    metadata["sender_name"] = record.get("sender_name")
-    metadata["timestamp_ms"] = record.get("timestamp_ms")
-
-    return metadata
-
-
-loader = JSONLoader(
-    file_path='./example_data/facebook_chat.json',
-    jq_schema='.messages[]',
-    content_key="content",
-    metadata_func=metadata_func
-)
-
-data = loader.load()
-```
-
-
-```python
-pprint(data)
-```
-
-```output
-    [Document(page_content='Bye!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1, 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}),
-     Document(page_content='Oh no worries! Bye', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2, 'sender_name': 'User 1', 'timestamp_ms': 1675597435669}),
-     Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3, 'sender_name': 'User 2', 'timestamp_ms': 1675596277579}),
-     Document(page_content='I thought you were selling the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4, 'sender_name': 'User 1', 'timestamp_ms': 1675595140251}),
-     Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5, 'sender_name': 'User 1', 'timestamp_ms': 1675595109305}),
-     Document(page_content='Here is $129', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6, 'sender_name': 'User 2', 'timestamp_ms': 1675595068468}),
-     Document(page_content='', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7, 'sender_name': 'User 2', 'timestamp_ms': 1675595060730}),
-     Document(page_content='Online is at least $100', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8, 'sender_name': 'User 2', 'timestamp_ms': 1675595045152}),
-     Document(page_content='How much do you want?', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9, 'sender_name': 'User 1', 'timestamp_ms': 1675594799696}),
-     Document(page_content='Goodmorning! $50 is too low.', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10, 'sender_name': 'User 2', 'timestamp_ms': 1675577876645}),
-     Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': '/Users/avsolatorio/WBG/langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11, 'sender_name': 'User 1', 'timestamp_ms': 1675549022673})]
-```
-
-
-Now, you will see that the documents contain the metadata associated with the content we extracted.
-
-## The `metadata_func`
-
-As shown above, the `metadata_func` accepts the default metadata generated by the `JSONLoader`. This allows full control to the user with respect to how the metadata is formatted.
-
-For example, the default metadata contains the `source` and the `seq_num` keys. However, it is possible that the JSON data contain these keys as well. The user can then exploit the `metadata_func` to rename the default keys and use the ones from the JSON data.
-
-The example below shows how we can modify the `source` to only contain information of the file source relative to the `langchain` directory.
-
-
-```python
-# Define the metadata extraction function.
-def metadata_func(record: dict, metadata: dict) -> dict:
-
-    metadata["sender_name"] = record.get("sender_name")
-    metadata["timestamp_ms"] = record.get("timestamp_ms")
-
-    if "source" in metadata:
-        source = metadata["source"].split("/")
-        source = source[source.index("langchain"):]
-        metadata["source"] = "/".join(source)
-
-    return metadata
-
-
-loader = JSONLoader(
-    file_path='./example_data/facebook_chat.json',
-    jq_schema='.messages[]',
-    content_key="content",
-    metadata_func=metadata_func
-)
-
-data = loader.load()
-```
-
-
-```python
-pprint(data)
-```
-
-```output
-    [Document(page_content='Bye!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 1, 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}),
-     Document(page_content='Oh no worries! Bye', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 2, 'sender_name': 'User 1', 'timestamp_ms': 1675597435669}),
-     Document(page_content='No Im sorry it was my mistake, the blue one is not for sale', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 3, 'sender_name': 'User 2', 'timestamp_ms': 1675596277579}),
-     Document(page_content='I thought you were selling the blue one!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 4, 'sender_name': 'User 1', 'timestamp_ms': 1675595140251}),
-     Document(page_content='Im not interested in this bag. Im interested in the blue one!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 5, 'sender_name': 'User 1', 'timestamp_ms': 1675595109305}),
-     Document(page_content='Here is $129', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 6, 'sender_name': 'User 2', 'timestamp_ms': 1675595068468}),
-     Document(page_content='', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 7, 'sender_name': 'User 2', 'timestamp_ms': 1675595060730}),
-     Document(page_content='Online is at least $100', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 8, 'sender_name': 'User 2', 'timestamp_ms': 1675595045152}),
-     Document(page_content='How much do you want?', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 9, 'sender_name': 'User 1', 'timestamp_ms': 1675594799696}),
-     Document(page_content='Goodmorning! $50 is too low.', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 10, 'sender_name': 'User 2', 'timestamp_ms': 1675577876645}),
-     Document(page_content='Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!', metadata={'source': 'langchain/docs/modules/indexes/document_loaders/examples/example_data/facebook_chat.json', 'seq_num': 11, 'sender_name': 'User 1', 'timestamp_ms': 1675549022673})]
-```
-
-
-## Common JSON structures with jq schema
-
-The list below provides a reference to the possible `jq_schema` the user can use to extract content from the JSON data depending on the structure.
-
-```
-JSON        -> [{"text": ...}, {"text": ...}, {"text": ...}]
-jq_schema   -> ".[].text"
-
-JSON        -> {"key": [{"text": ...}, {"text": ...}, {"text": ...}]}
-jq_schema   -> ".key[].text"
-
-JSON        -> ["...", "...", "..."]
-jq_schema   -> ".[]"
-```
diff --git a/langchain_md_files/how_to/document_loader_office_file.mdx b/langchain_md_files/how_to/document_loader_office_file.mdx
deleted file mode 100644
index 6d2ef5faad005f284440380f42ac23084813ece4..0000000000000000000000000000000000000000
--- a/langchain_md_files/how_to/document_loader_office_file.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
-# How to load Microsoft Office files
-
-The [Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.
-
-This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a LangChain 
-[Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
-object that we can use downstream.
-
-
-## Loading DOCX, XLSX, PPTX with AzureAIDocumentIntelligenceLoader
-
-[Azure AI Document Intelligence](https://aka.ms/doc-intelligence) (formerly known as `Azure Form Recognizer`) is machine-learning 
-based service that extracts texts (including handwriting), tables, document structures (e.g., titles, section headings, etc.) and key-value-pairs from
-digital or scanned PDFs, images, Office and HTML files. Document Intelligence supports `PDF`, `JPEG/JPG`, `PNG`, `BMP`, `TIFF`, `HEIF`, `DOCX`, `XLSX`, `PPTX` and `HTML`.
-
-This [current implementation](https://aka.ms/di-langchain) of a loader using `Document Intelligence` can incorporate content page-wise and turn it into LangChain documents. The default output format is markdown, which can be easily chained with `MarkdownHeaderTextSplitter` for semantic document chunking. You can also use `mode="single"` or `mode="page"` to return pure texts in a single page or document split by page.
-
-### Prerequisite
-
-An Azure AI Document Intelligence resource in one of the 3 preview regions: **East US**, **West US2**, **West Europe** - follow [this document](https://learn.microsoft.com/azure/ai-services/document-intelligence/create-document-intelligence-resource?view=doc-intel-4.0.0) to create one if you don't have. You will be passing `<endpoint>` and `<key>` as parameters to the loader.
-
-```python
-%pip install --upgrade --quiet  langchain langchain-community azure-ai-documentintelligence
-
-from langchain_community.document_loaders import AzureAIDocumentIntelligenceLoader
-
-file_path = "<filepath>"
-endpoint = "<endpoint>"
-key = "<key>"
-loader = AzureAIDocumentIntelligenceLoader(
-    api_endpoint=endpoint, api_key=key, file_path=file_path, api_model="prebuilt-layout"
-)
-
-documents = loader.load()
-```
diff --git a/langchain_md_files/how_to/embed_text.mdx b/langchain_md_files/how_to/embed_text.mdx
deleted file mode 100644
index 0c636fec3e7a084e369be35950ca276cb83073c0..0000000000000000000000000000000000000000
--- a/langchain_md_files/how_to/embed_text.mdx
+++ /dev/null
@@ -1,61 +0,0 @@
-# Text embedding models
-
-:::info
-Head to [Integrations](/docs/integrations/text_embedding/) for documentation on built-in integrations with text embedding model providers.
-:::
-
-The Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.
-
-Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
-
-The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former, `.embed_documents`, takes as input multiple texts, while the latter, `.embed_query`, takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
-`.embed_query` will return a list of floats, whereas `.embed_documents` returns a list of lists of floats.
-
-## Get started
-
-### Setup
-
-import EmbeddingTabs from "@theme/EmbeddingTabs";
-
-<EmbeddingTabs customVarName="embeddings_model" />
-
-### `embed_documents`
-#### Embed list of texts
-
-Use `.embed_documents` to embed a list of strings, recovering a list of embeddings:
-
-```python
-embeddings = embeddings_model.embed_documents(
-    [
-        "Hi there!",
-        "Oh, hello!",
-        "What's your name?",
-        "My friends call me World",
-        "Hello World!"
-    ]
-)
-len(embeddings), len(embeddings[0])
-```
-
-```output
-(5, 1536)
-```
-
-
-### `embed_query`
-#### Embed single query
-Use `.embed_query` to embed a single piece of text (e.g., for the purpose of comparing to other embedded pieces of texts).
-
-```python
-embedded_query = embeddings_model.embed_query("What was the name mentioned in the conversation?")
-embedded_query[:5]
-```
-
-```output
-[0.0053587136790156364,
- -0.0004999046213924885,
- 0.038883671164512634,
- -0.003001077566295862,
- -0.00900818221271038]
-```
-
diff --git a/langchain_md_files/how_to/index.mdx b/langchain_md_files/how_to/index.mdx
deleted file mode 100644
index be06d087bbb9cac8dff3dfdb2a41bdacd60b2a44..0000000000000000000000000000000000000000
--- a/langchain_md_files/how_to/index.mdx
+++ /dev/null
@@ -1,373 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
----
-
-# How-to guides
-
-Here you’ll find answers to “How do I….?” types of questions.
-These guides are *goal-oriented* and *concrete*; they're meant to help you complete a specific task.
-For conceptual explanations see the [Conceptual guide](/docs/concepts/).
-For end-to-end walkthroughs see [Tutorials](/docs/tutorials).
-For comprehensive descriptions of every class and function see the [API Reference](https://python.langchain.com/api_reference/).
-
-## Installation
-
-- [How to: install LangChain packages](/docs/how_to/installation/)
-- [How to: use LangChain with different Pydantic versions](/docs/how_to/pydantic_compatibility)
-
-## Key features
-
-This highlights functionality that is core to using LangChain.
-
-- [How to: return structured data from a model](/docs/how_to/structured_output/)
-- [How to: use a model to call tools](/docs/how_to/tool_calling)
-- [How to: stream runnables](/docs/how_to/streaming)
-- [How to: debug your LLM apps](/docs/how_to/debugging/)
-
-## Components
-
-These are the core building blocks you can use when building applications.
-
-### Chat models
-
-[Chat Models](/docs/concepts/chat_models) are newer forms of language models that take messages in and output a message.
-See [supported integrations](/docs/integrations/chat/) for details on getting started with chat models from a specific provider.
-
-- [How to: do function/tool calling](/docs/how_to/tool_calling)
-- [How to: get models to return structured output](/docs/how_to/structured_output)
-- [How to: cache model responses](/docs/how_to/chat_model_caching)
-- [How to: get log probabilities](/docs/how_to/logprobs)
-- [How to: create a custom chat model class](/docs/how_to/custom_chat_model)
-- [How to: stream a response back](/docs/how_to/chat_streaming)
-- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
-- [How to: track response metadata across providers](/docs/how_to/response_metadata)
-- [How to: use chat model to call tools](/docs/how_to/tool_calling)
-- [How to: stream tool calls](/docs/how_to/tool_streaming)
-- [How to: handle rate limits](/docs/how_to/chat_model_rate_limiting)
-- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
-- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific)
-- [How to: force a specific tool call](/docs/how_to/tool_choice)
-- [How to: work with local models](/docs/how_to/local_llms)
-- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
-
-### Messages
-
-[Messages](/docs/concepts/messages) are the input and output of chat models. They have some `content` and a `role`, which describes the source of the message.
-
-- [How to: trim messages](/docs/how_to/trim_messages/)
-- [How to: filter messages](/docs/how_to/filter_messages/)
-- [How to: merge consecutive messages of the same type](/docs/how_to/merge_message_runs/)
-
-### Prompt templates
-
-[Prompt Templates](/docs/concepts/prompt_templates) are responsible for formatting user input into a format that can be passed to a language model.
-
-- [How to: use few shot examples](/docs/how_to/few_shot_examples)
-- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/)
-- [How to: partially format prompt templates](/docs/how_to/prompts_partial)
-- [How to: compose prompts together](/docs/how_to/prompts_composition)
-
-### Example selectors
-
-[Example Selectors](/docs/concepts/example_selectors) are responsible for selecting the correct few shot examples to pass to the prompt.
-
-- [How to: use example selectors](/docs/how_to/example_selectors)
-- [How to: select examples by length](/docs/how_to/example_selectors_length_based)
-- [How to: select examples by semantic similarity](/docs/how_to/example_selectors_similarity)
-- [How to: select examples by semantic ngram overlap](/docs/how_to/example_selectors_ngram)
-- [How to: select examples by maximal marginal relevance](/docs/how_to/example_selectors_mmr)
-- [How to: select examples from LangSmith few-shot datasets](/docs/how_to/example_selectors_langsmith/)
-
-### LLMs
-
-What LangChain calls [LLMs](/docs/concepts/text_llms) are older forms of language models that take a string in and output a string.
-
-- [How to: cache model responses](/docs/how_to/llm_caching)
-- [How to: create a custom LLM class](/docs/how_to/custom_llm)
-- [How to: stream a response back](/docs/how_to/streaming_llm)
-- [How to: track token usage](/docs/how_to/llm_token_usage_tracking)
-- [How to: work with local models](/docs/how_to/local_llms)
-
-### Output parsers
-
-[Output Parsers](/docs/concepts/output_parsers) are responsible for taking the output of an LLM and parsing into more structured format.
-
-- [How to: parse text from message objects](/docs/how_to/output_parser_string)
-- [How to: use output parsers to parse an LLM response into structured format](/docs/how_to/output_parser_structured)
-- [How to: parse JSON output](/docs/how_to/output_parser_json)
-- [How to: parse XML output](/docs/how_to/output_parser_xml)
-- [How to: parse YAML output](/docs/how_to/output_parser_yaml)
-- [How to: retry when output parsing errors occur](/docs/how_to/output_parser_retry)
-- [How to: try to fix errors in output parsing](/docs/how_to/output_parser_fixing)
-- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
-
-### Document loaders
-
-[Document Loaders](/docs/concepts/document_loaders) are responsible for loading documents from a variety of sources.
-
-- [How to: load PDF files](/docs/how_to/document_loader_pdf)
-- [How to: load web pages](/docs/how_to/document_loader_web)
-- [How to: load CSV data](/docs/how_to/document_loader_csv)
-- [How to: load data from a directory](/docs/how_to/document_loader_directory)
-- [How to: load HTML data](/docs/how_to/document_loader_html)
-- [How to: load JSON data](/docs/how_to/document_loader_json)
-- [How to: load Markdown data](/docs/how_to/document_loader_markdown)
-- [How to: load Microsoft Office data](/docs/how_to/document_loader_office_file)
-- [How to: write a custom document loader](/docs/how_to/document_loader_custom)
-
-### Text splitters
-
-[Text Splitters](/docs/concepts/text_splitters) take a document and split into chunks that can be used for retrieval.
-
-- [How to: recursively split text](/docs/how_to/recursive_text_splitter)
-- [How to: split HTML](/docs/how_to/split_html)
-- [How to: split by character](/docs/how_to/character_text_splitter)
-- [How to: split code](/docs/how_to/code_splitter)
-- [How to: split Markdown by headers](/docs/how_to/markdown_header_metadata_splitter)
-- [How to: recursively split JSON](/docs/how_to/recursive_json_splitter)
-- [How to: split text into semantic chunks](/docs/how_to/semantic-chunker)
-- [How to: split by tokens](/docs/how_to/split_by_token)
-
-### Embedding models
-
-[Embedding Models](/docs/concepts/embedding_models) take a piece of text and create a numerical representation of it.
-See [supported integrations](/docs/integrations/text_embedding/) for details on getting started with embedding models from a specific provider.
-
-- [How to: embed text data](/docs/how_to/embed_text)
-- [How to: cache embedding results](/docs/how_to/caching_embeddings)
-- [How to: create a custom embeddings class](/docs/how_to/custom_embeddings)
-
-### Vector stores
-
-[Vector stores](/docs/concepts/vectorstores) are databases that can efficiently store and retrieve embeddings.
-See [supported integrations](/docs/integrations/vectorstores/) for details on getting started with vector stores from a specific provider.
-
-- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores)
-
-### Retrievers
-
-[Retrievers](/docs/concepts/retrievers) are responsible for taking a query and returning relevant documents.
-
-- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever)
-- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever)
-- [How to: use contextual compression to compress the data retrieved](/docs/how_to/contextual_compression)
-- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
-- [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever)
-- [How to: combine the results from multiple retrievers](/docs/how_to/ensemble_retriever)
-- [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/docs/how_to/long_context_reorder)
-- [How to: generate multiple embeddings per document](/docs/how_to/multi_vector)
-- [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever)
-- [How to: generate metadata filters](/docs/how_to/self_query)
-- [How to: create a time-weighted retriever](/docs/how_to/time_weighted_vectorstore)
-- [How to: use hybrid vector and keyword retrieval](/docs/how_to/hybrid)
-
-### Indexing
-
-Indexing is the process of keeping your vectorstore in-sync with the underlying data source.
-
-- [How to: reindex data to keep your vectorstore in-sync with the underlying data source](/docs/how_to/indexing)
-
-### Tools
-
-LangChain [Tools](/docs/concepts/tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. Refer [here](/docs/integrations/tools/) for a list of pre-buit tools. 
-
-- [How to: create tools](/docs/how_to/custom_tools)
-- [How to: use built-in tools and toolkits](/docs/how_to/tools_builtin)
-- [How to: use chat models to call tools](/docs/how_to/tool_calling)
-- [How to: pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model)
-- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
-- [How to: add a human-in-the-loop for tools](/docs/how_to/tools_human)
-- [How to: handle tool errors](/docs/how_to/tools_error)
-- [How to: force models to call a tool](/docs/how_to/tool_choice)
-- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel)
-- [How to: access the `RunnableConfig` from a tool](/docs/how_to/tool_configure)
-- [How to: stream events from a tool](/docs/how_to/tool_stream_events)
-- [How to: return artifacts from a tool](/docs/how_to/tool_artifacts/)
-- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
-- [How to: add ad-hoc tool calling capability to models](/docs/how_to/tools_prompting)
-- [How to: pass in runtime secrets](/docs/how_to/runnable_runtime_secrets)
-
-### Multimodal
-
-- [How to: pass multimodal data directly to models](/docs/how_to/multimodal_inputs/)
-- [How to: use multimodal prompts](/docs/how_to/multimodal_prompts/)
-
-
-### Agents
-
-:::note
-
-For in depth how-to guides for agents, please check out [LangGraph](https://langchain-ai.github.io/langgraph/) documentation.
-
-:::
-
-- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor)
-- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent)
-
-### Callbacks
-
-[Callbacks](/docs/concepts/callbacks) allow you to hook into the various stages of your LLM application's execution.
-
-- [How to: pass in callbacks at runtime](/docs/how_to/callbacks_runtime)
-- [How to: attach callbacks to a module](/docs/how_to/callbacks_attach)
-- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
-- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
-- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
-- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
-
-### Custom
-
-All of LangChain components can easily be extended to support your own versions.
-
-- [How to: create a custom chat model class](/docs/how_to/custom_chat_model)
-- [How to: create a custom LLM class](/docs/how_to/custom_llm)
-- [How to: create a custom embeddings class](/docs/how_to/custom_embeddings)
-- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
-- [How to: write a custom document loader](/docs/how_to/document_loader_custom)
-- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
-- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
-- [How to: define a custom tool](/docs/how_to/custom_tools)
-- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
-
-### Serialization
-- [How to: save and load LangChain objects](/docs/how_to/serialization)
-
-## Use cases
-
-These guides cover use-case specific details.
-
-### Q&A with RAG
-
-Retrieval Augmented Generation (RAG) is a way to connect LLMs to external sources of data.
-For a high-level tutorial on RAG, check out [this guide](/docs/tutorials/rag/).
-
-- [How to: add chat history](/docs/how_to/qa_chat_history_how_to/)
-- [How to: stream](/docs/how_to/qa_streaming/)
-- [How to: return sources](/docs/how_to/qa_sources/)
-- [How to: return citations](/docs/how_to/qa_citations/)
-- [How to: do per-user retrieval](/docs/how_to/qa_per_user/)
-
-
-### Extraction
-
-Extraction is when you use LLMs to extract structured information from unstructured text.
-For a high level tutorial on extraction, check out [this guide](/docs/tutorials/extraction/).
-
-- [How to: use reference examples](/docs/how_to/extraction_examples/)
-- [How to: handle long text](/docs/how_to/extraction_long_text/)
-- [How to: do extraction without using function calling](/docs/how_to/extraction_parse)
-
-### Chatbots
-
-Chatbots involve using an LLM to have a conversation.
-For a high-level tutorial on building chatbots, check out [this guide](/docs/tutorials/chatbot/).
-
-- [How to: manage memory](/docs/how_to/chatbots_memory)
-- [How to: do retrieval](/docs/how_to/chatbots_retrieval)
-- [How to: use tools](/docs/how_to/chatbots_tools)
-- [How to: manage large chat history](/docs/how_to/trim_messages/)
-
-### Query analysis
-
-Query Analysis is the task of using an LLM to generate a query to send to a retriever.
-For a high-level tutorial on query analysis, check out [this guide](/docs/tutorials/rag/#query-analysis).
-
-- [How to: add examples to the prompt](/docs/how_to/query_few_shot)
-- [How to: handle cases where no queries are generated](/docs/how_to/query_no_queries)
-- [How to: handle multiple queries](/docs/how_to/query_multiple_queries)
-- [How to: handle multiple retrievers](/docs/how_to/query_multiple_retrievers)
-- [How to: construct filters](/docs/how_to/query_constructing_filters)
-- [How to: deal with high cardinality categorical variables](/docs/how_to/query_high_cardinality)
-
-### Q&A over SQL + CSV
-
-You can use LLMs to do question answering over tabular data.
-For a high-level tutorial, check out [this guide](/docs/tutorials/sql_qa/).
-
-- [How to: use prompting to improve results](/docs/how_to/sql_prompting)
-- [How to: do query validation](/docs/how_to/sql_query_checking)
-- [How to: deal with large databases](/docs/how_to/sql_large_db)
-- [How to: deal with CSV files](/docs/how_to/sql_csv)
-
-### Q&A over graph databases
-
-You can use an LLM to do question answering over graph databases.
-For a high-level tutorial, check out [this guide](/docs/tutorials/graph/).
-
-- [How to: add a semantic layer over the database](/docs/how_to/graph_semantic)
-- [How to: construct knowledge graphs](/docs/how_to/graph_constructing)
-
-### Summarization
-
-LLMs can summarize and otherwise distill desired information from text, including
-large volumes of text. For a high-level tutorial, check out [this guide](/docs/tutorials/summarization).
-
-- [How to: summarize text in a single LLM call](/docs/how_to/summarize_stuff)
-- [How to: summarize text through parallelization](/docs/how_to/summarize_map_reduce)
-- [How to: summarize text through iterative refinement](/docs/how_to/summarize_refine)
-
-## LangChain Expression Language (LCEL)
-
-:::note Should I use LCEL?
-
-LCEL is an orchestration solution. See our
-[concepts page](/docs/concepts/lcel/#should-i-use-lcel) for recommendations on when to
-use LCEL.
-
-:::
-
-[LangChain Expression Language](/docs/concepts/lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) protocol.
-
-[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives.
-
-[**Migration guide**](/docs/versions/migrating_chains): For migrating legacy chain abstractions to LCEL.
-
-- [How to: chain runnables](/docs/how_to/sequence)
-- [How to: stream runnables](/docs/how_to/streaming)
-- [How to: invoke runnables in parallel](/docs/how_to/parallel/)
-- [How to: add default invocation args to runnables](/docs/how_to/binding/)
-- [How to: turn any function into a runnable](/docs/how_to/functions)
-- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough)
-- [How to: configure runnable behavior at runtime](/docs/how_to/configure)
-- [How to: add message history (memory) to a chain](/docs/how_to/message_history)
-- [How to: route between sub-chains](/docs/how_to/routing)
-- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/)
-- [How to: inspect runnables](/docs/how_to/inspect)
-- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
-- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets)
-
-## [LangGraph](https://langchain-ai.github.io/langgraph)
-
-LangGraph is an extension of LangChain aimed at
-building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
-
-LangGraph documentation is currently hosted on a separate site.
-You can peruse [LangGraph how-to guides here](https://langchain-ai.github.io/langgraph/how-tos/).
-
-## [LangSmith](https://docs.smith.langchain.com/)
-
-LangSmith allows you to closely trace, monitor and evaluate your LLM application.
-It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build.
-
-LangSmith documentation is hosted on a separate site.
-You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/), but we'll highlight a few sections that are particularly
-relevant to LangChain below:
-
-### Evaluation
-<span data-heading-keywords="evaluation,evaluate"></span>
-
-Evaluating performance is a vital part of building LLM-powered applications.
-LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators.
-
-To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides#evaluation).
-
-### Tracing
-<span data-heading-keywords="trace,tracing"></span>
-
-Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
-
-- [How to: trace with LangChain](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain)
-- [How to: add metadata and tags to traces](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain#add-metadata-and-tags-to-traces)
-
-You can see general tracing-related how-tos [in this section of the LangSmith docs](https://docs.smith.langchain.com/how_to_guides/tracing).
diff --git a/langchain_md_files/how_to/installation.mdx b/langchain_md_files/how_to/installation.mdx
deleted file mode 100644
index 871ce87bd71ce8328bc3f5112fbbc6e769c94c68..0000000000000000000000000000000000000000
--- a/langchain_md_files/how_to/installation.mdx
+++ /dev/null
@@ -1,118 +0,0 @@
----
-sidebar_position: 2
----
-
-# How to install LangChain packages
-
-The LangChain ecosystem is split into different packages, which allow you to choose exactly which pieces of
-functionality to install.
-
-## Official release
-
-To install the main `langchain` package, run:
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-import CodeBlock from "@theme/CodeBlock";
-
-<Tabs>
-  <TabItem value="pip" label="Pip" default>
-    <CodeBlock language="bash">pip install langchain</CodeBlock>
-  </TabItem>
-  <TabItem value="conda" label="Conda">
-    <CodeBlock language="bash">conda install langchain -c conda-forge</CodeBlock>
-  </TabItem>
-</Tabs>
-
-While this package acts as a sane starting point to using LangChain,
-much of the value of LangChain comes when integrating it with various model providers, datastores, etc.
-By default, the dependencies needed to do that are NOT installed. You will need to install the dependencies for specific integrations separately, which we show below.
-
-## Ecosystem packages
-
-With the exception of the `langsmith` SDK, all packages in the LangChain ecosystem depend on `langchain-core`, which contains base
-classes and abstractions that other packages use. The dependency graph below shows how the different packages are related.
-A directed arrow indicates that the source package depends on the target package:
-
-![](/img/ecosystem_packages.png)
-
-When installing a package, you do not need to explicitly install that package's explicit dependencies (such as `langchain-core`).
-However, you may choose to if you are using a feature only available in a certain version of that dependency.
-If you do, you should make sure that the installed or pinned version is compatible with any other integration packages you use.
-
-### LangChain core
-The `langchain-core` package contains base abstractions that the rest of the LangChain ecosystem uses, along with the LangChain Expression Language. It is automatically installed by `langchain`, but can also be used separately. Install with:
-
-```bash
-pip install langchain-core
-```
-
-### Integration packages
-
-Certain integrations like OpenAI and Anthropic have their own packages.
-Any integrations that require their own package will be documented as such in the [Integration docs](/docs/integrations/providers/).
-You can see a list of all integration packages in the [API reference](https://python.langchain.com/api_reference/) under the "Partner libs" dropdown.
-To install one of these run:
-
-```bash
-pip install langchain-openai
-```
-
-Any integrations that haven't been split out into their own packages will live in the `langchain-community` package. Install with:
-
-```bash
-pip install langchain-community
-```
-
-### LangChain experimental
-The `langchain-experimental` package holds experimental LangChain code, intended for research and experimental uses.
-Install with:
-
-```bash
-pip install langchain-experimental
-```
-
-### LangGraph
-`langgraph` is a library for building stateful, multi-actor applications with LLMs. It integrates smoothly with LangChain, but can be used without it.
-Install with:
-
-```bash
-pip install langgraph
-```
-
-### LangServe
-LangServe helps developers deploy LangChain runnables and chains as a REST API.
-LangServe is automatically installed by LangChain CLI.
-If not using LangChain CLI, install with:
-
-```bash
-pip install "langserve[all]"
-```
-for both client and server dependencies. Or `pip install "langserve[client]"` for client code, and `pip install "langserve[server]"` for server code.
-
-### LangChain CLI
-The LangChain CLI is useful for working with LangChain templates and other LangServe projects.
-Install with:
-
-```bash
-pip install langchain-cli
-```
-
-### LangSmith SDK
-The LangSmith SDK is automatically installed by LangChain. However, it does not depend on
-`langchain-core`, and can be installed and used independently if desired.
-If you are not using LangChain, you can install it with:
-
-```bash
-pip install langsmith
-```
-
-### From source
-
-If you want to install a package from source, you can do so by cloning the [main LangChain repo](https://github.com/langchain-ai/langchain), enter the directory of the package you want to install `PATH/TO/REPO/langchain/libs/{package}`, and run:
-
-```bash
-pip install -e .
-```
-
-LangGraph, LangSmith SDK, and certain integration packages live outside the main LangChain repo. You can see [all repos here](https://github.com/langchain-ai).
diff --git a/langchain_md_files/how_to/toolkits.mdx b/langchain_md_files/how_to/toolkits.mdx
deleted file mode 100644
index c1f74199da1afc2e102f714cb0b6dc914f3712a3..0000000000000000000000000000000000000000
--- a/langchain_md_files/how_to/toolkits.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
----
-sidebar_position: 3
----
-# How to use toolkits
-
-
-Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
-
-All Toolkits expose a `get_tools` method which returns a list of tools.
-You can therefore do:
-
-```python
-# Initialize a toolkit
-toolkit = ExampleTookit(...)
-
-# Get list of tools
-tools = toolkit.get_tools()
-
-# Create agent
-agent = create_agent_method(llm, tools, prompt)
-```
diff --git a/langchain_md_files/how_to/vectorstores.mdx b/langchain_md_files/how_to/vectorstores.mdx
deleted file mode 100644
index 01d861503272fd2c3b3c0c4d3129168ff5861d88..0000000000000000000000000000000000000000
--- a/langchain_md_files/how_to/vectorstores.mdx
+++ /dev/null
@@ -1,168 +0,0 @@
-# How to create and query vector stores
-
-:::info
-Head to [Integrations](/docs/integrations/vectorstores/) for documentation on built-in integrations with 3rd-party vector stores.
-:::
-
-One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding
-vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are
-'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search
-for you.
-
-## Get started
-
-This guide showcases basic functionality related to vector stores. A key part of working with vector stores is creating the vector to put in them,
-which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [text embedding model interfaces](/docs/how_to/embed_text) before diving into this.
-
-Before using the vectorstore at all, we need to load some data and initialize an embedding model.
-
-We want to use OpenAIEmbeddings so we have to get the OpenAI API Key.
-
-```python
-import os
-import getpass
-
-os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')
-```
-
-```python
-from langchain_community.document_loaders import TextLoader
-from langchain_openai import OpenAIEmbeddings
-from langchain_text_splitters import CharacterTextSplitter
-
-# Load the document, split it into chunks, embed each chunk and load it into the vector store.
-raw_documents = TextLoader('state_of_the_union.txt').load()
-text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
-documents = text_splitter.split_documents(raw_documents)
-```
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-There are many great vector store options, here are a few that are free, open-source, and run entirely on your local machine. Review all integrations for many great hosted offerings.
-
-
-<Tabs>
-  <TabItem value="chroma" label="Chroma" default>
-
-This walkthrough uses the `chroma` vector database, which runs on your local machine as a library.
-
-```bash
-pip install langchain-chroma
-```
-
-```python
-from langchain_chroma import Chroma
-
-db = Chroma.from_documents(documents, OpenAIEmbeddings())
-```
-
-  </TabItem>
-  <TabItem value="faiss" label="FAISS">
-
-This walkthrough uses the `FAISS` vector database, which makes use of the Facebook AI Similarity Search (FAISS) library.
-
-```bash
-pip install faiss-cpu
-```
-
-```python
-from langchain_community.vectorstores import FAISS
-
-db = FAISS.from_documents(documents, OpenAIEmbeddings())
-```
-
-  </TabItem>
-  <TabItem value="lance" label="Lance">
-
-This notebook shows how to use functionality related to the LanceDB vector database based on the Lance data format.
-
-```bash
-pip install lancedb
-```
-
-```python
-from langchain_community.vectorstores import LanceDB
-
-import lancedb
-
-db = lancedb.connect("/tmp/lancedb")
-table = db.create_table(
-    "my_table",
-    data=[
-        {
-            "vector": embeddings.embed_query("Hello World"),
-            "text": "Hello World",
-            "id": "1",
-        }
-    ],
-    mode="overwrite",
-)
-db = LanceDB.from_documents(documents, OpenAIEmbeddings())
-```
-
-  </TabItem>
-</Tabs>
-
-
-## Similarity search
-
-All vectorstores expose a `similarity_search` method.
-This will take incoming documents, create an embedding of them, and then find all documents with the most similar embedding.
-
-```python
-query = "What did the president say about Ketanji Brown Jackson"
-docs = db.similarity_search(query)
-print(docs[0].page_content)
-```
-
-```output
-    Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections.
-
-    Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.
-
-    One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.
-
-    And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.
-```
-
-
-### Similarity search by vector
-
-It is also possible to do a search for documents similar to a given embedding vector using `similarity_search_by_vector` which accepts an embedding vector as a parameter instead of a string.
-
-```python
-embedding_vector = OpenAIEmbeddings().embed_query(query)
-docs = db.similarity_search_by_vector(embedding_vector)
-print(docs[0].page_content)
-```
-
-```output
-    Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections.
-
-    Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.
-
-    One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.
-
-    And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.
-```
-
-
-## Async Operations
-
-
-Vector stores are usually run as a separate service that requires some IO operations, and therefore they might be called asynchronously. That gives performance benefits as you don't waste time waiting for responses from external services. That might also be important if you work with an asynchronous framework, such as [FastAPI](https://fastapi.tiangolo.com/).
-
-LangChain supports async operation on vector stores. All the methods might be called using their async counterparts, with the prefix `a`, meaning `async`.
-
-```python
-docs = await db.asimilarity_search(query)
-docs
-```
-
-```output
-[Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': 'state_of_the_union.txt'}),
- Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling.  \n\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers.  \n\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', metadata={'source': 'state_of_the_union.txt'}),
- Document(page_content='And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n\nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together.  \n\nFirst, beat the opioid epidemic.', metadata={'source': 'state_of_the_union.txt'}),
- Document(page_content='Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \n\nAnd as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up.  \n\nThat ends on my watch. \n\nMedicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \n\nWe’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \n\nLet’s pass the Paycheck Fairness Act and paid leave.  \n\nRaise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \n\nLet’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.', metadata={'source': 'state_of_the_union.txt'})]
-```
diff --git a/langchain_md_files/integrations/chat/index.mdx b/langchain_md_files/integrations/chat/index.mdx
deleted file mode 100644
index fb43dcdc89bc3ddc72141cf9498a5d32258f7c33..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/chat/index.mdx
+++ /dev/null
@@ -1,40 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
-keywords: [compatibility]
----
-
-# Chat models
-
-[Chat models](/docs/concepts/chat_models) are language models that use a sequence of [messages](/docs/concepts/messages) as inputs and return messages as outputs (as opposed to using plain text). These are generally newer models.
-
-:::info
-
-If you'd like to write your own chat model, see [this how-to](/docs/how_to/custom_chat_model/).
-If you'd like to contribute an integration, see [Contributing integrations](/docs/contributing/how_to/integrations/).
-
-:::
-
-import ChatModelTabs from "@theme/ChatModelTabs";
-
-<ChatModelTabs overrideParams={{openai: {model: "gpt-4o-mini"}}} />
-
-```python
-model.invoke("Hello, world!")
-```
-
-## Featured Providers
-
-:::info
-While all these LangChain classes support the indicated advanced feature, you may have
-to open the provider-specific documentation to learn which hosted models or backends support
-the feature.
-:::
-
-import { CategoryTable, IndexTable } from "@theme/FeatureTables";
-
-<CategoryTable category="chat" />
-
-## All chat models
-
-<IndexTable />
\ No newline at end of file
diff --git a/langchain_md_files/integrations/document_loaders/index.mdx b/langchain_md_files/integrations/document_loaders/index.mdx
deleted file mode 100644
index 971d40cd10cdfc0174695345eef898eebd407ed0..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/document_loaders/index.mdx
+++ /dev/null
@@ -1,73 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
----
-
-# Document loaders
-
-import { CategoryTable, IndexTable } from "@theme/FeatureTables";
-
-DocumentLoaders load data into the standard LangChain Document format.
-
-Each DocumentLoader has its own specific parameters, but they can all be invoked in the same way with the .load method.
-An example use case is as follows:
-
-```python
-from langchain_community.document_loaders.csv_loader import CSVLoader
-
-loader = CSVLoader(
-    ...  # <-- Integration specific parameters here
-)
-data = loader.load()
-```
-
-## Webpages
-
-The below document loaders allow you to load webpages.
-
-See this guide for a starting point: [How to: load web pages](/docs/how_to/document_loader_web).
-
-<CategoryTable category="webpage_loaders" />
-
-## PDFs
-
-The below document loaders allow you to load PDF documents.
-
-See this guide for a starting point: [How to: load PDF files](/docs/how_to/document_loader_pdf).
-
-<CategoryTable category="pdf_loaders" />
-
-## Cloud Providers
-
-The below document loaders allow you to load documents from your favorite cloud providers.
-
-<CategoryTable category="cloud_provider_loaders"/>
-
-## Social Platforms
-
-The below document loaders allow you to load documents from different social media platforms.
-
-<CategoryTable category="social_loaders"/>
-
-## Messaging Services
-
-The below document loaders allow you to load data from different messaging platforms.
-
-<CategoryTable category="messaging_loaders"/>
-
-## Productivity tools
-
-The below document loaders allow you to load data from commonly used productivity tools.
-
-<CategoryTable category="productivity_loaders"/>
-
-## Common File Types
-
-The below document loaders allow you to load data from common data formats.
-
-<CategoryTable category="common_loaders" />
-
-
-## All document loaders
-
-<IndexTable />
diff --git a/langchain_md_files/integrations/graphs/tigergraph.mdx b/langchain_md_files/integrations/graphs/tigergraph.mdx
deleted file mode 100644
index a9901459a0d9b0dcb047b2358d4ca6918c8a724b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/graphs/tigergraph.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
-# TigerGraph
-
->[TigerGraph](https://www.tigergraph.com/tigergraph-db/) is a natively distributed and high-performance graph database.
-> The storage of data in a graph format of vertices and edges leads to rich relationships, 
-> ideal for grouding LLM responses.
- 
-A big example of the `TigerGraph` and `LangChain` integration [presented here](https://github.com/tigergraph/graph-ml-notebooks/blob/main/applications/large_language_models/TigerGraph_LangChain_Demo.ipynb).
-
-## Installation and Setup
-
-Follow instructions [how to connect to the `TigerGraph` database](https://docs.tigergraph.com/pytigergraph/current/getting-started/connection).
-
-Install the Python SDK:
-
-```bash
-pip install pyTigerGraph
-```
-
-## Example
-
-To utilize the `TigerGraph InquiryAI` functionality, you can import `TigerGraph` from `langchain_community.graphs`.
-
-```python
-import pyTigerGraph as tg
-
-conn = tg.TigerGraphConnection(host="DATABASE_HOST_HERE", graphname="GRAPH_NAME_HERE", username="USERNAME_HERE", password="PASSWORD_HERE")
-
-### ==== CONFIGURE INQUIRYAI HOST ====
-conn.ai.configureInquiryAIHost("INQUIRYAI_HOST_HERE")
-
-from langchain_community.graphs import TigerGraph
-
-graph = TigerGraph(conn)
-result = graph.query("How many servers are there?")
-print(result)
-```
-
diff --git a/langchain_md_files/integrations/llms/index.mdx b/langchain_md_files/integrations/llms/index.mdx
deleted file mode 100644
index 1831274c455555e6e75505ffd8ef6b6b1aac797a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/llms/index.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
-keywords: [compatibility]
----
-
-# LLMs
-
-:::caution
-You are currently on a page documenting the use of [text completion models](/docs/concepts/text_llms). Many of the latest and most popular models are [chat completion models](/docs/concepts/chat_models).
-
-Unless you are specifically using more advanced prompting techniques, you are probably looking for [this page instead](/docs/integrations/chat/).
-:::
-
-[LLMs](/docs/concepts/text_llms) are language models that take a string as input and return a string as output.
-
-:::info
-
-If you'd like to write your own LLM, see [this how-to](/docs/how_to/custom_llm/).
-If you'd like to contribute an integration, see [Contributing integrations](/docs/contributing/how_to/integrations/).
-
-:::
-
-import { CategoryTable, IndexTable } from "@theme/FeatureTables";
-
-<CategoryTable category="llms" />
-
-## All LLMs
-
-<IndexTable />
diff --git a/langchain_md_files/integrations/llms/layerup_security.mdx b/langchain_md_files/integrations/llms/layerup_security.mdx
deleted file mode 100644
index 6beee5320903dcfc9ef58373189a1417ef3017a5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/llms/layerup_security.mdx
+++ /dev/null
@@ -1,85 +0,0 @@
-# Layerup Security
-
-The [Layerup Security](https://uselayerup.com) integration allows you to secure your calls to any LangChain LLM, LLM chain or LLM agent. The LLM object wraps around any existing LLM object, allowing for a secure layer between your users and your LLMs.
-
-While the Layerup Security object is designed as an LLM, it is not actually an LLM itself, it simply wraps around an LLM, allowing it to adapt the same functionality as the underlying LLM.
-
-## Setup
-First, you'll need a Layerup Security account from the Layerup [website](https://uselayerup.com).
-
-Next, create a project via the [dashboard](https://dashboard.uselayerup.com), and copy your API key. We recommend putting your API key in your project's environment.
-
-Install the Layerup Security SDK:
-```bash
-pip install LayerupSecurity
-```
-
-And install LangChain Community:
-```bash
-pip install langchain-community
-```
-
-And now you're ready to start protecting your LLM calls with Layerup Security!
-
-```python
-from langchain_community.llms.layerup_security import LayerupSecurity
-from langchain_openai import OpenAI
-
-# Create an instance of your favorite LLM
-openai = OpenAI(
-    model_name="gpt-3.5-turbo",
-    openai_api_key="OPENAI_API_KEY",
-)
-
-# Configure Layerup Security
-layerup_security = LayerupSecurity(
-    # Specify a LLM that Layerup Security will wrap around
-    llm=openai,
-
-    # Layerup API key, from the Layerup dashboard
-    layerup_api_key="LAYERUP_API_KEY",
-
-    # Custom base URL, if self hosting
-    layerup_api_base_url="https://api.uselayerup.com/v1",
-
-    # List of guardrails to run on prompts before the LLM is invoked
-    prompt_guardrails=[],
-
-    # List of guardrails to run on responses from the LLM
-    response_guardrails=["layerup.hallucination"],
-
-    # Whether or not to mask the prompt for PII & sensitive data before it is sent to the LLM
-    mask=False,
-
-    # Metadata for abuse tracking, customer tracking, and scope tracking.
-    metadata={"customer": "example@uselayerup.com"},
-
-    # Handler for guardrail violations on the prompt guardrails
-    handle_prompt_guardrail_violation=(
-        lambda violation: {
-            "role": "assistant",
-            "content": (
-                "There was sensitive data! I cannot respond. "
-                "Here's a dynamic canned response. Current date: {}"
-            ).format(datetime.now())
-        }
-        if violation["offending_guardrail"] == "layerup.sensitive_data"
-        else None
-    ),
-
-    # Handler for guardrail violations on the response guardrails
-    handle_response_guardrail_violation=(
-        lambda violation: {
-            "role": "assistant",
-            "content": (
-                "Custom canned response with dynamic data! "
-                "The violation rule was {}."
-            ).format(violation["offending_guardrail"])
-        }
-    ),
-)
-
-response = layerup_security.invoke(
-    "Summarize this message: my name is Bob Dylan. My SSN is 123-45-6789."
-)
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/acreom.mdx b/langchain_md_files/integrations/providers/acreom.mdx
deleted file mode 100644
index 78987870a2d2fbc3958980798f724ebaf52b2f5f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/acreom.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
-# Acreom
-
-[acreom](https://acreom.com) is a dev-first knowledge base with tasks running on local `markdown` files.
-
-## Installation and Setup
-
-No installation is required. 
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/acreom).
-
-```python
-from langchain_community.document_loaders import AcreomLoader
-```
diff --git a/langchain_md_files/integrations/providers/activeloop_deeplake.mdx b/langchain_md_files/integrations/providers/activeloop_deeplake.mdx
deleted file mode 100644
index f0bcb60afd6d31f084bfd2a2f69ed06173736524..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/activeloop_deeplake.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
-# Activeloop Deep Lake
-
->[Activeloop Deep Lake](https://docs.activeloop.ai/) is a data lake for Deep Learning applications, allowing you to use it 
-> as a vector store.
-
-## Why Deep Lake?
-
-- More than just a (multi-modal) vector store. You can later use the dataset to fine-tune your own LLM models.
-- Not only stores embeddings, but also the original data with automatic version control.
-- Truly serverless. Doesn't require another service and can be used with major cloud providers (`AWS S3`, `GCS`, etc.)
-
-`Activeloop Deep Lake` supports `SelfQuery Retrieval`:
-[Activeloop Deep Lake Self Query Retrieval](/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query)
-
-
-## More Resources
-
-1. [Ultimate Guide to LangChain & Deep Lake: Build ChatGPT to Answer Questions on Your Financial Data](https://www.activeloop.ai/resources/ultimate-guide-to-lang-chain-deep-lake-build-chat-gpt-to-answer-questions-on-your-financial-data/)
-2. [Twitter the-algorithm codebase analysis with Deep Lake](https://github.com/langchain-ai/langchain/blob/master/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb)
-3. Here is [whitepaper](https://www.deeplake.ai/whitepaper) and [academic paper](https://arxiv.org/pdf/2209.10785.pdf) for Deep Lake
-4. Here is a set of additional resources available for review: [Deep Lake](https://github.com/activeloopai/deeplake), [Get started](https://docs.activeloop.ai/getting-started) and [Tutorials](https://docs.activeloop.ai/hub-tutorials)
-
-## Installation and Setup
-
-Install the Python package:
-
-```bash
-pip install deeplake
-```
-
-
-## VectorStore
-
-```python
-from langchain_community.vectorstores import DeepLake
-```
-
-See a [usage example](/docs/integrations/vectorstores/activeloop_deeplake).
diff --git a/langchain_md_files/integrations/providers/ads4gpts.mdx b/langchain_md_files/integrations/providers/ads4gpts.mdx
deleted file mode 100644
index fd82f96f8ebb86a2c4f7e14de39587edd4bf4fa2..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ads4gpts.mdx
+++ /dev/null
@@ -1,82 +0,0 @@
-# ADS4GPTs
-
-> [ADS4GPTs](https://www.ads4gpts.com/) is building the open monetization backbone of the AI-Native internet. It helps AI applications monetize through advertising with a UX and Privacy first approach. 
-
-## Installation and Setup
-
-### Using pip
-You can install the package directly from PyPI:
-
-```bash
-pip install ads4gpts-langchain
-```
-
-### From Source
-Alternatively, install from source:
-
-```bash
-git clone https://github.com/ADS4GPTs/ads4gpts.git
-cd ads4gpts/libs/python-sdk/ads4gpts-langchain
-pip install .
-```
-
-## Prerequisites
-
-- Python 3.11+
-- ADS4GPTs API Key ([Obtain API Key](https://www.ads4gpts.com))
-
-## Environment Variables
-Set the following environment variables for API authentication:
-
-```bash
-export ADS4GPTS_API_KEY='your-ads4gpts-api-key'
-```
-
-Alternatively, API keys can be passed directly when initializing classes or stored in a `.env` file.
-
-## Tools
-
-ADS4GPTs provides two main tools for monetization:
-
-### Ads4gptsInlineSponsoredResponseTool
-This tool fetches native, sponsored responses that can be seamlessly integrated within your AI application's outputs.
-
-```python
-from ads4gpts_langchain import Ads4gptsInlineSponsoredResponseTool
-```
-
-### Ads4gptsSuggestedPromptTool
-Generates sponsored prompt suggestions to enhance user engagement and provide monetization opportunities.
-
-```python
-from ads4gpts_langchain import Ads4gptsSuggestedPromptTool
-```
-### Ads4gptsInlineConversationalTool
-Delivers conversational sponsored content that naturally fits within chat interfaces and dialogs.
-
-```python
-from ads4gpts_langchain import Ads4gptsInlineConversationalTool
-```
-
-### Ads4gptsInlineBannerTool
-Provides inline banner advertisements that can be displayed within your AI application's response.
-
-```python
-from ads4gpts_langchain import Ads4gptsInlineBannerTool
-```
-
-### Ads4gptsSuggestedBannerTool
-Generates banner advertisement suggestions that can be presented to users as recommended content.
-
-```python
-from ads4gpts_langchain import Ads4gptsSuggestedBannerTool
-```
-
-## Toolkit
-
-The `Ads4gptsToolkit` combines these tools for convenient access in LangChain applications.
-
-```python
-from ads4gpts_langchain import Ads4gptsToolkit
-```
-
diff --git a/langchain_md_files/integrations/providers/aerospike.mdx b/langchain_md_files/integrations/providers/aerospike.mdx
deleted file mode 100644
index 9dfbaa68091de27bc11145b368d97679606b8edb..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/aerospike.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-# Aerospike
-
->[Aerospike](https://aerospike.com/docs/vector) is a high-performance, distributed database known for its speed and scalability, now with support for vector storage and search, enabling retrieval and search of embedding vectors for machine learning and AI applications.
-> See the documentation for Aerospike Vector Search (AVS) [here](https://aerospike.com/docs/vector).
-
-## Installation and Setup
-
-Install the AVS Python SDK and AVS langchain vector store:
-
-```bash
-pip install aerospike-vector-search langchain-community
-
-See the documentation for the Ptyhon SDK [here](https://aerospike-vector-search-python-client.readthedocs.io/en/latest/index.html).
-The documentation for the AVS langchain vector store is [here](https://python.langchain.com/api_reference/community/vectorstores/langchain_community.vectorstores.aerospike.Aerospike.html).
-
-## Vector Store
-
-To import this vectorstore:
-
-```python
-from langchain_community.vectorstores import Aerospike
-
-See a usage example [here](https://python.langchain.com/docs/integrations/vectorstores/aerospike/).
-
diff --git a/langchain_md_files/integrations/providers/ai21.mdx b/langchain_md_files/integrations/providers/ai21.mdx
deleted file mode 100644
index 140e755da8b83d976efb38aba2d71f8baf6d731f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ai21.mdx
+++ /dev/null
@@ -1,56 +0,0 @@
-# AI21 Labs
-
->[AI21 Labs](https://www.ai21.com/about) is a company specializing in Natural 
-> Language Processing (NLP), which develops AI systems 
-> that can understand and generate natural language.
-
-This page covers how to use the `AI21` ecosystem within `LangChain`.
-
-## Installation and Setup
-
-- Get an AI21 api key and set it as an environment variable (`AI21_API_KEY`)
-- Install the Python package:
-
-```bash
-pip install langchain-ai21
-```
-
-## Chat models
-
-### AI21 Chat 
-
-See a [usage example](/docs/integrations/chat/ai21).
-
-```python
-from langchain_ai21 import ChatAI21
-```
-
-## Deprecated features
-
-:::caution The following features are deprecated. 
-:::
-
-### AI21 LLM
-
-```python
-from langchain_ai21 import AI21LLM
-```
-
-### AI21 Contextual Answer
-
-```python
-from langchain_ai21 import AI21ContextualAnswers
-```
-
-### AI21 Embeddings
-
-```python
-from langchain_ai21 import AI21Embeddings
-```
-## Text splitters
-
-### AI21 Semantic Text Splitter
-
-```python
-from langchain_ai21 import AI21SemanticTextSplitter
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/ainetwork.mdx b/langchain_md_files/integrations/providers/ainetwork.mdx
deleted file mode 100644
index fdd8393e23cb51f28267b1f808d9be56d9653734..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ainetwork.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-# AINetwork
-
->[AI Network](https://www.ainetwork.ai/build-on-ain) is a layer 1 blockchain designed to accommodate 
-> large-scale AI models, utilizing a decentralized GPU network powered by the 
-> [$AIN token](https://www.ainetwork.ai/token), enriching AI-driven `NFTs` (`AINFTs`).
-
-
-## Installation and Setup
-
-You need to install `ain-py` python package.
-
-```bash
-pip install ain-py
-```
-You need to set the `AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY` environmental variable to your AIN Blockchain Account Private Key.
-## Toolkit
-
-See a [usage example](/docs/integrations/tools/ainetwork).
-
-```python
-from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
-```
-
diff --git a/langchain_md_files/integrations/providers/airbyte.mdx b/langchain_md_files/integrations/providers/airbyte.mdx
deleted file mode 100644
index f1198b14861a2d3f7ef930b26e93080a3c4df5c7..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/airbyte.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
-# Airbyte
-
->[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, 
-> databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.
-
-## Installation and Setup
-
-```bash
-pip install -U langchain-airbyte
-```
-
-:::note
-
-Currently, the `langchain-airbyte` library does not support Pydantic v2.
-Please downgrade to Pydantic v1 to use this package.
-
-This package also currently requires Python 3.10+.
-
-:::
-
-The integration package doesn't require any global environment variables that need to be
-set, but some integrations (e.g. `source-github`) may need credentials passed in.
-
-## Document loader
-
-### AirbyteLoader
-
-See a [usage example](/docs/integrations/document_loaders/airbyte).
-
-```python
-from langchain_airbyte import AirbyteLoader
-```
diff --git a/langchain_md_files/integrations/providers/alchemy.mdx b/langchain_md_files/integrations/providers/alchemy.mdx
deleted file mode 100644
index f1d7bbbcf75fa1ba67f432606688f101c0d3cf91..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/alchemy.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# Alchemy
-
->[Alchemy](https://www.alchemy.com) is the platform to build blockchain applications.
-
-## Installation and Setup
-
-Check out the [installation guide](/docs/integrations/document_loaders/blockchain).
-
-## Document loader
-
-### BlockchainLoader on the Alchemy platform
-
-See a [usage example](/docs/integrations/document_loaders/blockchain).
-
-```python
-from langchain_community.document_loaders.blockchain import (
-    BlockchainDocumentLoader,
-    BlockchainType,
-)
-```
diff --git a/langchain_md_files/integrations/providers/aleph_alpha.mdx b/langchain_md_files/integrations/providers/aleph_alpha.mdx
deleted file mode 100644
index 4f8a5d0e086eb78ad022bdff58e4a064930a8a5e..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/aleph_alpha.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
-# Aleph Alpha
-
->[Aleph Alpha](https://docs.aleph-alpha.com/) was founded in 2019 with the mission to research and build the foundational technology for an era of strong AI. The team of international scientists, engineers, and innovators researches, develops, and deploys transformative AI like large language and multimodal models and runs the fastest European commercial AI cluster.
-
->[The Luminous series](https://docs.aleph-alpha.com/docs/introduction/luminous/) is a family of large language models.
-
-## Installation and Setup
-
-```bash
-pip install aleph-alpha-client
-```
-
-You have to create a new token. Please, see [instructions](https://docs.aleph-alpha.com/docs/account/#create-a-new-token).
-
-```python
-from getpass import getpass
-
-ALEPH_ALPHA_API_KEY = getpass()
-```
-
-
-## LLM
-
-See a [usage example](/docs/integrations/llms/aleph_alpha).
-
-```python
-from langchain_community.llms import AlephAlpha
-```
-
-## Text Embedding Models
-
-See a [usage example](/docs/integrations/text_embedding/aleph_alpha).
-
-```python
-from langchain_community.embeddings import AlephAlphaSymmetricSemanticEmbedding, AlephAlphaAsymmetricSemanticEmbedding
-```
diff --git a/langchain_md_files/integrations/providers/alibaba_cloud.mdx b/langchain_md_files/integrations/providers/alibaba_cloud.mdx
deleted file mode 100644
index baf0fe9fe076815679cba0fc57b87db3beecea3d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/alibaba_cloud.mdx
+++ /dev/null
@@ -1,99 +0,0 @@
-# Alibaba Cloud
-
->[Alibaba Group Holding Limited (Wikipedia)](https://en.wikipedia.org/wiki/Alibaba_Group), or `Alibaba`
-> (Chinese: 阿里巴巴), is a Chinese multinational technology company specializing in e-commerce, retail, 
-> Internet, and technology.
-> 
-> [Alibaba Cloud (Wikipedia)](https://en.wikipedia.org/wiki/Alibaba_Cloud), also known as `Aliyun`
-> (Chinese: 阿里云; pinyin: Ālǐyún; lit. 'Ali Cloud'), is a cloud computing company, a subsidiary 
-> of `Alibaba Group`. `Alibaba Cloud` provides cloud computing services to online businesses and 
-> Alibaba's own e-commerce ecosystem.
- 
- 
-## LLMs
-
-### Alibaba Cloud PAI EAS
-
-See [installation instructions and a usage example](/docs/integrations/llms/alibabacloud_pai_eas_endpoint).
-
-```python
-from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
-```
-
-### Tongyi Qwen
-
-See [installation instructions and a usage example](/docs/integrations/llms/tongyi).
-
-```python
-from langchain_community.llms import Tongyi
-```
-
-## Chat Models
-
-### Alibaba Cloud PAI EAS
-
-See [installation instructions and a usage example](/docs/integrations/chat/alibaba_cloud_pai_eas).
-
-```python
-from langchain_community.chat_models import PaiEasChatEndpoint
-```
-
-### Tongyi Qwen Chat
-
-See [installation instructions and a usage example](/docs/integrations/chat/tongyi).
-
-```python
-from langchain_community.chat_models.tongyi import ChatTongyi
-```
-
-## Document Loaders
-
-### Alibaba Cloud MaxCompute
-
-See [installation instructions and a usage example](/docs/integrations/document_loaders/alibaba_cloud_maxcompute).
-
-```python
-from langchain_community.document_loaders import MaxComputeLoader
-```
-
-## Vector stores
-
-### Alibaba Cloud OpenSearch
-
-See [installation instructions and a usage example](/docs/integrations/vectorstores/alibabacloud_opensearch).
-
-```python
-from langchain_community.vectorstores import AlibabaCloudOpenSearch, AlibabaCloudOpenSearchSettings
-```
-
-### Alibaba Cloud Tair
-
-See [installation instructions and a usage example](/docs/integrations/vectorstores/tair).
-
-```python
-from langchain_community.vectorstores import Tair
-```
-
-### AnalyticDB
-
-See [installation instructions and a usage example](/docs/integrations/vectorstores/analyticdb).
-
-```python
-from langchain_community.vectorstores import AnalyticDB
-```
-
-### Hologres
-
-See [installation instructions and a usage example](/docs/integrations/vectorstores/hologres).
-
-```python
-from langchain_community.vectorstores import Hologres
-```
-
-### Tablestore
-
-See [installation instructions and a usage example](/docs/integrations/vectorstores/tablestore).
-
-```python
-from langchain_community.vectorstores import TablestoreVectorStore
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/analyticdb.mdx b/langchain_md_files/integrations/providers/analyticdb.mdx
deleted file mode 100644
index 7a9e551075e8595854a04704f5444deb27fac0a4..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/analyticdb.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# AnalyticDB
-
->[AnalyticDB for PostgreSQL](https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/latest/product-introduction-overview) 
-> is a massively parallel processing (MPP) data warehousing service 
-> from [Alibaba Cloud](https://www.alibabacloud.com/)
->that is designed to analyze large volumes of data online.
-
->`AnalyticDB for PostgreSQL` is developed based on the open-source `Greenplum Database` 
-> project and is enhanced with in-depth extensions by `Alibaba Cloud`. AnalyticDB 
-> for PostgreSQL is compatible with the ANSI SQL 2003 syntax and the PostgreSQL and 
-> Oracle database ecosystems. AnalyticDB for PostgreSQL also supports row store and 
-> column store. AnalyticDB for PostgreSQL processes petabytes of data offline at a 
-> high performance level and supports highly concurrent.
-
-This page covers how to use the AnalyticDB ecosystem within LangChain.
-
-## Installation and Setup
-
-You need to install the `sqlalchemy` python package.
-
-```bash
-pip install sqlalchemy
-```
-
-## VectorStore
-
-See a [usage example](/docs/integrations/vectorstores/analyticdb).
-
-```python
-from langchain_community.vectorstores import AnalyticDB
-```
diff --git a/langchain_md_files/integrations/providers/annoy.mdx b/langchain_md_files/integrations/providers/annoy.mdx
deleted file mode 100644
index 18a86fbfa398f7016a20b8765fde9140a2d0ad2a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/annoy.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Annoy
-
-> [Annoy](https://github.com/spotify/annoy) (`Approximate Nearest Neighbors Oh Yeah`) 
-> is a C++ library with Python bindings to search for points in space that are 
-> close to a given query point. It also creates large read-only file-based data 
-> structures that are mapped into memory so that many processes may share the same data. 
-
-## Installation and Setup
-
-```bash
-pip install annoy
-```
-
-
-## Vectorstore
-
-See a [usage example](/docs/integrations/vectorstores/annoy).
-
-```python
-from langchain_community.vectorstores import Annoy
-```
diff --git a/langchain_md_files/integrations/providers/anthropic.mdx b/langchain_md_files/integrations/providers/anthropic.mdx
deleted file mode 100644
index dfa9340f6ec004ed1df4b78de5d125a8d90d1707..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/anthropic.mdx
+++ /dev/null
@@ -1,43 +0,0 @@
-# Anthropic
-
->[Anthropic](https://www.anthropic.com/) is an AI safety and research company, and is the creator of `Claude`.
-This page covers all integrations between `Anthropic` models and `LangChain`.
-
-## Installation and Setup
-
-To use `Anthropic` models, you need to install a python package:
-
-```bash
-pip install -U langchain-anthropic
-```
-
-You need to set the `ANTHROPIC_API_KEY` environment variable.
-You can get an Anthropic API key [here](https://console.anthropic.com/settings/keys)
-
-## Chat Models
-
-### ChatAnthropic
-
-See a [usage example](/docs/integrations/chat/anthropic).
-
-```python
-from langchain_anthropic import ChatAnthropic
-
-model = ChatAnthropic(model='claude-3-opus-20240229')
-```
-
-
-## LLMs
-
-### [Legacy] AnthropicLLM
-
-**NOTE**: `AnthropicLLM` only supports legacy `Claude 2` models. 
-To use the newest `Claude 3` models, please use `ChatAnthropic` instead.
-
-See a [usage example](/docs/integrations/llms/anthropic).
-
-```python
-from langchain_anthropic import AnthropicLLM
-
-model = AnthropicLLM(model='claude-2.1')
-```
diff --git a/langchain_md_files/integrations/providers/anyscale.mdx b/langchain_md_files/integrations/providers/anyscale.mdx
deleted file mode 100644
index 8b35f0490e3ff3511b0795c559f9f783d0593e7a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/anyscale.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
-# Anyscale
-
->[Anyscale](https://www.anyscale.com) is a platform to run, fine tune and scale LLMs via production-ready APIs.
-> [Anyscale Endpoints](https://docs.anyscale.com/endpoints/overview) serve many open-source models in a cost-effective way.
-
-`Anyscale` also provides [an example](https://docs.anyscale.com/endpoints/model-serving/examples/langchain-integration) 
-how to setup `LangChain` with `Anyscale` for advanced chat agents.
-
-## Installation and Setup
-
-- Get an Anyscale Service URL, route and API key and set them as environment variables (`ANYSCALE_SERVICE_URL`,`ANYSCALE_SERVICE_ROUTE`, `ANYSCALE_SERVICE_TOKEN`). 
-- Please see [the Anyscale docs](https://www.anyscale.com/get-started) for more details.
-
-We have to install the `openai` package:
-
-```bash
-pip install openai
-```
-
-## LLM
-
-See a [usage example](/docs/integrations/llms/anyscale).
-
-```python
-from langchain_community.llms.anyscale import Anyscale
-```
-
-## Chat Models
-
-See a [usage example](/docs/integrations/chat/anyscale).
-
-```python
-from langchain_community.chat_models.anyscale import ChatAnyscale
-```
-
-## Embeddings
-
-See a [usage example](/docs/integrations/text_embedding/anyscale).
-
-```python
-from langchain_community.embeddings import AnyscaleEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/apache.mdx b/langchain_md_files/integrations/providers/apache.mdx
deleted file mode 100644
index 6acb7156111e968bf3c3be3ba3af93478757d274..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/apache.mdx
+++ /dev/null
@@ -1,63 +0,0 @@
-# Apache Software Foundation
-
->[The Apache Software Foundation (Wikipedia)](https://en.wikipedia.org/wiki/The_Apache_Software_Foundation) 
-> is a decentralized open source community of developers. The software they 
-> produce is distributed under the terms of the Apache License, a permissive 
-> open-source license for free and open-source software (FOSS). The Apache projects 
-> are characterized by a collaborative, consensus-based development process 
-> and an open and pragmatic software license, which is to say that it 
-> allows developers, who receive the software freely, to redistribute 
-> it under non-free terms. Each project is managed by a self-selected 
-> team of technical experts who are active contributors to the project.
-
-## Apache AGE
-
->[Apache AGE](https://age.apache.org/) is a `PostgreSQL` extension that provides 
-> graph database functionality. `AGE` is an acronym for `A Graph Extension`, and 
-> is inspired by Bitnine’s fork of `PostgreSQL 10`, `AgensGraph`, which is 
-> a multimodal database. The goal of the project is to create single 
-> storage that can handle both relational and graph model data so that users 
-> can use standard ANSI SQL along with `openCypher`, the Graph query language. 
-> The data elements `Apache AGE` stores are nodes, edges connecting them, and 
-> attributes of nodes and edges.
- 
-See more about [integrating with Apache AGE](/docs/integrations/graphs/apache_age).
-
-## Apache Cassandra
-
->[Apache Cassandra](https://cassandra.apache.org/) is a NoSQL, row-oriented, 
-> highly scalable and highly available database. Starting with version 5.0, 
-> the database ships with vector search capabilities.
- 
-See more about [integrating with Apache Cassandra](/docs/integrations/providers/cassandra/).
-
-## Apache Doris
-
->[Apache Doris](https://doris.apache.org/) is a modern data warehouse for 
-> real-time analytics. It delivers lightning-fast analytics on real-time data at scale.
->
->Usually `Apache Doris` is categorized into OLAP, and it has showed excellent 
-> performance in ClickBench — a Benchmark For Analytical DBMS. Since it has 
-> a super-fast vectorized execution engine, it could also be used as a fast vectordb.
- 
-See more about [integrating with Apache Doris](/docs/integrations/providers/apache_doris/).
-
-## Apache Kafka
-
->[Apache Kafka](https://github.com/apache/kafka) is a distributed messaging system 
-> that is used to publish and subscribe to streams of records.
- 
-See more about [integrating with Apache Kafka](/docs/integrations/memory/kafka_chat_message_history).
-
-
-## Apache Spark
-
->[Apache Spark](https://spark.apache.org/) is a unified analytics engine for 
-> large-scale data processing. It provides high-level APIs in Scala, Java, 
-> Python, and R, and an optimized engine that supports general computation 
-> graphs for data analysis. It also supports a rich set of higher-level 
-> tools including `Spark SQL` for SQL and DataFrames, `pandas API on Spark` 
-> for pandas workloads, `MLlib` for machine learning, 
-> `GraphX` for graph processing, and `Structured Streaming` for stream processing.
-
-See more about [integrating with Apache Spark](/docs/integrations/providers/spark).
diff --git a/langchain_md_files/integrations/providers/apache_doris.mdx b/langchain_md_files/integrations/providers/apache_doris.mdx
deleted file mode 100644
index 9beee729f33148e17afaf1960d16e371f20d4624..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/apache_doris.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Apache Doris
-
->[Apache Doris](https://doris.apache.org/) is a modern data warehouse for real-time analytics.
-It delivers lightning-fast analytics on real-time data at scale.
-
->Usually `Apache Doris` is categorized into OLAP, and it has showed excellent performance 
-> in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). 
-> Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.
-
-## Installation and Setup
-
-```bash
-pip install pymysql
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/apache_doris).
-
-```python
-from langchain_community.vectorstores import ApacheDoris
-```
diff --git a/langchain_md_files/integrations/providers/apify.mdx b/langchain_md_files/integrations/providers/apify.mdx
deleted file mode 100644
index 3d30c6d4a987eb2854433de9f3dd0362430914e6..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/apify.mdx
+++ /dev/null
@@ -1,58 +0,0 @@
-# Apify
-
-
->[Apify](https://apify.com) is a cloud platform for web scraping and data extraction,
->which provides an [ecosystem](https://apify.com/store) of more than a thousand
->ready-made apps called *Actors* for various scraping, crawling, and extraction use cases.
-
-[![Apify Actors](/img/ApifyActors.png)](https://apify.com/store)
-
-This integration enables you run Actors on the `Apify` platform and load their results into LangChain to feed your vector
-indexes with documents and data from the web, e.g. to generate answers from websites with documentation,
-blogs, or knowledge bases.
-
-
-## Installation and Setup
-
-- Install the LangChain Apify package for Python with:
-```bash
-pip install langchain-apify
-```
-- Get your [Apify API token](https://console.apify.com/account/integrations) and either set it as
-  an environment variable (`APIFY_API_TOKEN`) or pass it as `apify_api_token` in the constructor.
-
-## Tool
-
-You can use the `ApifyActorsTool` to use Apify Actors with agents.
-
-```python
-from langchain_apify import ApifyActorsTool
-```
-
-See [this notebook](/docs/integrations/tools/apify_actors) for example usage and a full example of a tool-calling agent with LangGraph in the [Apify LangGraph agent Actor template](https://apify.com/templates/python-langgraph).
-
-For more information on how to use this tool, visit [the Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph).
-
-## Wrapper
-
-You can use the `ApifyWrapper` to run Actors on the Apify platform.
-
-```python
-from langchain_apify import ApifyWrapper
-```
-
-For more information on how to use this wrapper, see [the Apify integration documentation](https://docs.apify.com/platform/integrations/langchain).
-
-
-## Document loader
-
-You can also use our `ApifyDatasetLoader` to get data from Apify dataset.
-
-```python
-from langchain_apify import ApifyDatasetLoader
-```
-
-For a more detailed walkthrough of this loader, see [this notebook](/docs/integrations/document_loaders/apify_dataset).
-
-
-Source code for this integration can be found in the [LangChain Apify repository](https://github.com/apify/langchain-apify).
diff --git a/langchain_md_files/integrations/providers/apple.mdx b/langchain_md_files/integrations/providers/apple.mdx
deleted file mode 100644
index 5a87afeb6c57eedc065630715b5abae19d72387c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/apple.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Apple
-
->[Apple Inc. (Wikipedia)](https://en.wikipedia.org/wiki/Apple_Inc.) is an American 
-> multinational corporation and technology company.
->
-> [iMessage (Wikipedia)](https://en.wikipedia.org/wiki/IMessage) is an instant 
-> messaging service developed by Apple Inc. and launched in 2011. 
-> `iMessage` functions exclusively on Apple platforms.
-
-## Installation and Setup
-
-See [setup instructions](/docs/integrations/chat_loaders/imessage).
-
-## Chat loader
-
-It loads chat sessions from the `iMessage` `chat.db` `SQLite` file.
-
-See a [usage example](/docs/integrations/chat_loaders/imessage).
-
-```python
-from langchain_community.chat_loaders.imessage import IMessageChatLoader
-```
diff --git a/langchain_md_files/integrations/providers/arangodb.mdx b/langchain_md_files/integrations/providers/arangodb.mdx
deleted file mode 100644
index ff2d312fa9e76ef3087c8d8d74cbce9c057f0cd8..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/arangodb.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# ArangoDB
-
->[ArangoDB](https://github.com/arangodb/arangodb) is a scalable graph database system to 
-> drive value from connected data, faster. Native graphs, an integrated search engine, and JSON support, via a single query language. ArangoDB runs on-prem, in the cloud – anywhere.
-
-## Installation and Setup
-
-Install the [ArangoDB Python Driver](https://github.com/ArangoDB-Community/python-arango) package with
-
-```bash
-pip install python-arango
-```
-
-## Graph QA Chain
-
-Connect your `ArangoDB` Database with a chat model to get insights on your data. 
-
-See the notebook example [here](/docs/integrations/graphs/arangodb).
-
-```python
-from arango import ArangoClient
-
-from langchain_community.graphs import ArangoGraph
-from langchain.chains import ArangoGraphQAChain
-```
diff --git a/langchain_md_files/integrations/providers/arcee.mdx b/langchain_md_files/integrations/providers/arcee.mdx
deleted file mode 100644
index b685dd9b2d72fe0ddeab5962b1cf64b2548dbd59..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/arcee.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
-# Arcee
-
->[Arcee](https://www.arcee.ai/about/about-us) enables the development and advancement 
-> of what we coin as SLMs—small, specialized, secure, and scalable language models. 
-> By offering a SLM Adaptation System and a seamless, secure integration, 
-> `Arcee` empowers enterprises to harness the full potential of 
-> domain-adapted language models, driving the transformative 
-> innovation in operations.
-
-
-## Installation and Setup
-
-Get your `Arcee API` key.
-
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/arcee).
-
-```python
-from langchain_community.llms import Arcee
-```
-
-## Retrievers
-
-See a [usage example](/docs/integrations/retrievers/arcee).
-
-```python
-from langchain_community.retrievers import ArceeRetriever
-```
diff --git a/langchain_md_files/integrations/providers/arcgis.mdx b/langchain_md_files/integrations/providers/arcgis.mdx
deleted file mode 100644
index c7a00fd7ffcc8454c35d0a4458e1ebfa1402d4be..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/arcgis.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
-# ArcGIS
-
->[ArcGIS](https://www.esri.com/en-us/arcgis/about-arcgis/overview) is a family of client, 
-> server and online geographic information system software developed and maintained by [Esri](https://www.esri.com/).
-> 
->`ArcGISLoader` uses the `arcgis` package.
-> `arcgis` is a Python library for the vector and raster analysis, geocoding, map making, 
-> routing and directions. It administers, organizes and manages users, 
-> groups and information items in your GIS.
->It enables access to ready-to-use maps and curated geographic data from `Esri` 
-> and other authoritative sources, and works with your own data as well. 
-
-## Installation and Setup
-
-We have to install the `arcgis` package.
-
-```bash
-pip install -U arcgis
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/arcgis).
-
-```python
-from langchain_community.document_loaders import ArcGISLoader
-```
diff --git a/langchain_md_files/integrations/providers/argilla.mdx b/langchain_md_files/integrations/providers/argilla.mdx
deleted file mode 100644
index fc4232e0ec9c488ab02fb73ee5811c2317223709..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/argilla.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Argilla
-
->[Argilla](https://argilla.io/) is an open-source data curation platform for LLMs. 
-> Using `Argilla`, everyone can build robust language models through faster data curation 
-> using both human and machine feedback. `Argilla` provides support for each step in the MLOps cycle, 
-> from data labeling to model monitoring.
-
-## Installation and Setup
-
-Get your [API key](https://platform.openai.com/account/api-keys).
-
-Install the Python package:
-
-```bash
-pip install argilla
-```
-
-## Callbacks
-
-
-```python
-from langchain.callbacks import ArgillaCallbackHandler
-```
-
-See an [example](/docs/integrations/callbacks/argilla).
diff --git a/langchain_md_files/integrations/providers/arize.mdx b/langchain_md_files/integrations/providers/arize.mdx
deleted file mode 100644
index 1f018195ac9138d20692f5b7de3e387d149ffd10..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/arize.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-# Arize
-
-[Arize](https://arize.com) is an AI observability and LLM evaluation platform that offers
-support for LangChain applications, providing detailed traces of input, embeddings, retrieval,
-functions, and output messages.
-
-
-## Installation and Setup
-
-First, you need to install `arize` python package.
-
-```bash
-pip install arize
-```
-
-Second, you need to set up your [Arize account](https://app.arize.com/auth/join)
-and get your  `API_KEY` or `SPACE_KEY`.
-
-
-## Callback handler
-
-```python
-from langchain_community.callbacks import ArizeCallbackHandler
-```
diff --git a/langchain_md_files/integrations/providers/arxiv.mdx b/langchain_md_files/integrations/providers/arxiv.mdx
deleted file mode 100644
index 7fabf7396c1b509e7d20122b998a27f17e990c57..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/arxiv.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
-# Arxiv
-
->[arXiv](https://arxiv.org/) is an open-access archive for 2 million scholarly articles in the fields of physics, 
-> mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering and 
-> systems science, and economics.
-
-
-## Installation and Setup
-
-First, you need to install `arxiv` python package.
-
-```bash
-pip install arxiv
-```
-
-Second, you need to install `PyMuPDF` python package which transforms PDF files downloaded from the `arxiv.org` site into the text format.
-
-```bash
-pip install pymupdf
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/arxiv).
-
-```python
-from langchain_community.document_loaders import ArxivLoader
-```
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/arxiv).
-
-```python
-from langchain_community.retrievers import ArxivRetriever
-```
diff --git a/langchain_md_files/integrations/providers/ascend.mdx b/langchain_md_files/integrations/providers/ascend.mdx
deleted file mode 100644
index b8c1769a48965c036cc8d43ae4790eaf8241c9f8..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ascend.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-# Ascend
-
->[Ascend](https://https://www.hiascend.com/) is Natural Process Unit provide by Huawei
-
-This page covers how to use ascend NPU with LangChain.
-
-### Installation
-
-Install using torch-npu using:
-
-```bash
-pip install torch-npu
-```
-
-Please follow the installation instructions as specified below:
-* Install CANN as shown [here](https://www.hiascend.com/document/detail/zh/canncommercial/700/quickstart/quickstart/quickstart_18_0002.html).
-
-### Embedding Models
-
-See a [usage example](/docs/integrations/text_embedding/ascend).
-
-```python
-from langchain_community.embeddings import AscendEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/asknews.mdx b/langchain_md_files/integrations/providers/asknews.mdx
deleted file mode 100644
index 1aa6dd81e4a6e2c767372f289b14358aced1cc63..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/asknews.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# AskNews
-
-[AskNews](https://asknews.app/) enhances language models with up-to-date global or historical news
-by processing and indexing over 300,000 articles daily, providing prompt-optimized responses
-through a low-latency endpoint, and ensuring transparency and diversity in its news coverage.
-
-## Installation and Setup
-
-First, you need to install `asknews` python package.
-
-```bash
-pip install asknews
-```
-
-You also need to set our AskNews API credentials, which can be generated at 
-the [AskNews console](https://my.asknews.app/).
-
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/asknews).
-
-```python
-from langchain_community.retrievers import AskNewsRetriever
-```
-
-## Tool
-
-See a [usage example](/docs/integrations/tools/asknews).
-
-```python
-from langchain_community.tools.asknews import AskNewsSearch
-```
diff --git a/langchain_md_files/integrations/providers/assemblyai.mdx b/langchain_md_files/integrations/providers/assemblyai.mdx
deleted file mode 100644
index dc666f2fc366f554d2364c2dc2f90a8a4f19e084..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/assemblyai.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
-# AssemblyAI
-
->[AssemblyAI](https://www.assemblyai.com/) builds `Speech AI` models for tasks like 
-speech-to-text, speaker diarization, speech summarization, and more.
-> `AssemblyAI’s` Speech AI models include accurate speech-to-text for voice data 
-> (such as calls, virtual meetings, and podcasts), speaker detection, sentiment analysis, 
-> chapter detection, PII redaction.
- 
-
-
-## Installation and Setup
-
-Get your [API key](https://www.assemblyai.com/dashboard/signup).
-
-Install the `assemblyai` package.
-
-```bash
-pip install -U assemblyai
-```
-
-## Document Loader
-
-###  AssemblyAI Audio Transcript
-
-The `AssemblyAIAudioTranscriptLoader` transcribes audio files with the `AssemblyAI API` 
-and loads the transcribed text into documents.
-
-See a [usage example](/docs/integrations/document_loaders/assemblyai).
-
-```python
-from langchain_community.document_loaders import AssemblyAIAudioTranscriptLoader
-```
-
-###  AssemblyAI Audio Loader By Id
-
-The `AssemblyAIAudioLoaderById` uses the AssemblyAI API to get an existing 
-transcription and loads the transcribed text into one or more Documents, 
-depending on the specified format.
-
-```python
-from langchain_community.document_loaders import AssemblyAIAudioLoaderById
-```
diff --git a/langchain_md_files/integrations/providers/astradb.mdx b/langchain_md_files/integrations/providers/astradb.mdx
deleted file mode 100644
index 853eafcc8ff5d7f2258f0cd340944bbe2f649ef5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/astradb.mdx
+++ /dev/null
@@ -1,150 +0,0 @@
-# Astra DB
-
-> [DataStax Astra DB](https://docs.datastax.com/en/astra/home/astra.html) is a serverless 
-> vector-capable database built on `Apache Cassandra®`and made conveniently available
-> through an easy-to-use JSON API.
-
-See a [tutorial provided by DataStax](https://docs.datastax.com/en/astra/astra-db-vector/tutorials/chatbot.html).
-
-## Installation and Setup
-
-Install the following Python package:
-```bash
-pip install "langchain-astradb>=0.1.0"
-```
-
-Get the [connection secrets](https://docs.datastax.com/en/astra/astra-db-vector/get-started/quickstart.html).
-Set up the following environment variables:
-
-```python
-ASTRA_DB_APPLICATION_TOKEN="TOKEN"
-ASTRA_DB_API_ENDPOINT="API_ENDPOINT"
-```
-
-## Vector Store
-
-```python
-from langchain_astradb import AstraDBVectorStore
-
-vector_store = AstraDBVectorStore(
-    embedding=my_embedding,
-    collection_name="my_store",
-    api_endpoint=ASTRA_DB_API_ENDPOINT,
-    token=ASTRA_DB_APPLICATION_TOKEN,
-)
-```
-
-Learn more in the [example notebook](/docs/integrations/vectorstores/astradb).
-
-See the [example provided by DataStax](https://docs.datastax.com/en/astra/astra-db-vector/integrations/langchain.html).
-
-## Chat message history
-
-```python
-from langchain_astradb import AstraDBChatMessageHistory
-
-message_history = AstraDBChatMessageHistory(
-    session_id="test-session",
-    api_endpoint=ASTRA_DB_API_ENDPOINT,
-    token=ASTRA_DB_APPLICATION_TOKEN,
-)
-```
-
-See the [usage example](/docs/integrations/memory/astradb_chat_message_history#example).
-
-## LLM Cache
-
-```python
-from langchain.globals import set_llm_cache
-from langchain_astradb import AstraDBCache
-
-set_llm_cache(AstraDBCache(
-    api_endpoint=ASTRA_DB_API_ENDPOINT,
-    token=ASTRA_DB_APPLICATION_TOKEN,
-))
-```
-
-Learn more in the [example notebook](/docs/integrations/llm_caching#astra-db-caches) (scroll to the Astra DB section).
-
-
-## Semantic LLM Cache
-
-```python
-from langchain.globals import set_llm_cache
-from langchain_astradb import AstraDBSemanticCache
-
-set_llm_cache(AstraDBSemanticCache(
-    embedding=my_embedding,
-    api_endpoint=ASTRA_DB_API_ENDPOINT,
-    token=ASTRA_DB_APPLICATION_TOKEN,
-))
-```
-
-Learn more in the [example notebook](/docs/integrations/llm_caching#astra-db-caches) (scroll to the appropriate section).
-
-Learn more in the [example notebook](/docs/integrations/memory/astradb_chat_message_history).
-
-## Document loader
-
-```python
-from langchain_astradb import AstraDBLoader
-
-loader = AstraDBLoader(
-    collection_name="my_collection",
-    api_endpoint=ASTRA_DB_API_ENDPOINT,
-    token=ASTRA_DB_APPLICATION_TOKEN,
-)
-```
-
-Learn more in the [example notebook](/docs/integrations/document_loaders/astradb).
-
-## Self-querying retriever
-
-```python
-from langchain_astradb import AstraDBVectorStore
-from langchain.retrievers.self_query.base import SelfQueryRetriever
-
-vector_store = AstraDBVectorStore(
-    embedding=my_embedding,
-    collection_name="my_store",
-    api_endpoint=ASTRA_DB_API_ENDPOINT,
-    token=ASTRA_DB_APPLICATION_TOKEN,
-)
-
-retriever = SelfQueryRetriever.from_llm(
-    my_llm,
-    vector_store,
-    document_content_description,
-    metadata_field_info
-)
-```
-
-Learn more in the [example notebook](/docs/integrations/retrievers/self_query/astradb).
-
-## Store
-
-```python
-from langchain_astradb import AstraDBStore
-
-store = AstraDBStore(
-    collection_name="my_kv_store",
-    api_endpoint=ASTRA_DB_API_ENDPOINT,
-    token=ASTRA_DB_APPLICATION_TOKEN,
-)
-```
-
-See the API Reference for the [AstraDBStore](https://python.langchain.com/api_reference/astradb/storage/langchain_astradb.storage.AstraDBStore.html).
-
-## Byte Store
-
-```python
-from langchain_astradb import AstraDBByteStore
-
-store = AstraDBByteStore(
-    collection_name="my_kv_store",
-    api_endpoint=ASTRA_DB_API_ENDPOINT,
-    token=ASTRA_DB_APPLICATION_TOKEN,
-)
-```
-
-See the API reference for the [AstraDBByteStore](https://python.langchain.com/api_reference/astradb/storage/langchain_astradb.storage.AstraDBByteStore.html).
diff --git a/langchain_md_files/integrations/providers/atlas.mdx b/langchain_md_files/integrations/providers/atlas.mdx
deleted file mode 100644
index 06545aca112a9500ee23ee1b5ff5f22b17655d33..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/atlas.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Atlas
-
->[Nomic Atlas](https://docs.nomic.ai/index.html) is a platform for interacting with both 
-> small and internet scale unstructured datasets.
-
-
-## Installation and Setup
-
-- Install the Python package with `pip install nomic`
-- `Nomic` is also included in langchains poetry extras `poetry install -E all`
-
-
-## VectorStore
-
-See a [usage example](/docs/integrations/vectorstores/atlas).
-
-```python
-from langchain_community.vectorstores import AtlasDB
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/aws.mdx b/langchain_md_files/integrations/providers/aws.mdx
deleted file mode 100644
index e30cbfd81f8b57aaead69c7a1d809c9ac8010779..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/aws.mdx
+++ /dev/null
@@ -1,392 +0,0 @@
-# AWS
-
-The `LangChain` integrations related to [Amazon AWS](https://aws.amazon.com/) platform.
-
-First-party AWS integrations are available in the `langchain_aws` package.
-
-```bash
-pip install langchain-aws
-```
-
-And there are also some community integrations available in the `langchain_community` package with the `boto3` optional dependency.
-
-```bash
-pip install langchain-community boto3
-```
-
-## Chat models
-
-### Bedrock Chat
-
->[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of 
-> high-performing foundation models (FMs) from leading AI companies like `AI21 Labs`, `Anthropic`, `Cohere`, 
-> `Meta`, `Stability AI`, and `Amazon` via a single API, along with a broad set of capabilities you need to 
-> build generative AI applications with security, privacy, and responsible AI. Using `Amazon Bedrock`, 
-> you can easily experiment with and evaluate top FMs for your use case, privately customize them with 
-> your data using techniques such as fine-tuning and `Retrieval Augmented Generation` (`RAG`), and build 
-> agents that execute tasks using your enterprise systems and data sources. Since `Amazon Bedrock` is 
-> serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy 
-> generative AI capabilities into your applications using the AWS services you are already familiar with.
-
-See a [usage example](/docs/integrations/chat/bedrock).
-
-```python
-from langchain_aws import ChatBedrock
-```
-
-### Bedrock Converse
-AWS has recently released the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) integration has been released.
-
-We recommend using `ChatBedrockConverse` for users who do not need to use custom models. See the [docs](/docs/integrations/chat/bedrock/#bedrock-converse-api) and [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) for more detail.
-
-```python
-from langchain_aws import ChatBedrockConverse
-```
-
-## LLMs
-
-### Bedrock
- 
-See a [usage example](/docs/integrations/llms/bedrock).
-
-```python
-from langchain_aws import BedrockLLM
-```
-
-### Amazon API Gateway
-
->[Amazon API Gateway](https://aws.amazon.com/api-gateway/) is a fully managed service that makes it easy for 
-> developers to create, publish, maintain, monitor, and secure APIs at any scale. APIs act as the "front door" 
-> for applications to access data, business logic, or functionality from your backend services. Using 
-> `API Gateway`, you can create RESTful APIs and WebSocket APIs that enable real-time two-way communication 
-> applications. `API Gateway` supports containerized and serverless workloads, as well as web applications.
-> 
-> `API Gateway` handles all the tasks involved in accepting and processing up to hundreds of thousands of 
-> concurrent API calls, including traffic management, CORS support, authorization and access control, 
-> throttling, monitoring, and API version management. `API Gateway` has no minimum fees or startup costs. 
-> You pay for the API calls you receive and the amount of data transferred out and, with the `API Gateway` 
-> tiered pricing model, you can reduce your cost as your API usage scales.
-
-See a [usage example](/docs/integrations/llms/amazon_api_gateway).
-
-```python
-from langchain_community.llms import AmazonAPIGateway
-```
-
-### SageMaker Endpoint
-
->[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a system that can build, train, and deploy 
-> machine learning (ML) models with fully managed infrastructure, tools, and workflows.
-
-We use `SageMaker` to host our model and expose it as the `SageMaker Endpoint`.
-
-See a [usage example](/docs/integrations/llms/sagemaker).
-
-```python
-from langchain_aws import SagemakerEndpoint
-```
-
-## Embedding Models
-
-### Bedrock
-
-See a [usage example](/docs/integrations/text_embedding/bedrock).
-```python
-from langchain_aws import BedrockEmbeddings
-```
-
-### SageMaker Endpoint
-
-See a [usage example](/docs/integrations/text_embedding/sagemaker-endpoint).
-```python
-from langchain_community.embeddings import SagemakerEndpointEmbeddings
-from langchain_community.llms.sagemaker_endpoint import ContentHandlerBase
-```
-
-## Document loaders
-
-### AWS S3 Directory and File
-
->[Amazon Simple Storage Service (Amazon S3)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-folders.html)
-> is an object storage service.
->[AWS S3 Directory](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-folders.html)
->[AWS S3 Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingBucket.html)
-
-See a [usage example for S3DirectoryLoader](/docs/integrations/document_loaders/aws_s3_directory).
-
-See a [usage example for S3FileLoader](/docs/integrations/document_loaders/aws_s3_file).
-
-```python
-from langchain_community.document_loaders import S3DirectoryLoader, S3FileLoader
-```
-
-### Amazon Textract
-
->[Amazon Textract](https://docs.aws.amazon.com/managedservices/latest/userguide/textract.html) is a machine 
-> learning (ML) service that automatically extracts text, handwriting, and data from scanned documents.
-
-See a [usage example](/docs/integrations/document_loaders/amazon_textract).
-
-```python
-from langchain_community.document_loaders import AmazonTextractPDFLoader
-```
-
-### Amazon Athena
-
->[Amazon Athena](https://aws.amazon.com/athena/) is a serverless, interactive analytics service built
->on open-source frameworks, supporting open-table and file formats.
-
-See a [usage example](/docs/integrations/document_loaders/athena).
-
-```python
-from langchain_community.document_loaders.athena import AthenaLoader
-```
-
-### AWS Glue
-
->The [AWS Glue Data Catalog](https://docs.aws.amazon.com/en_en/glue/latest/dg/catalog-and-crawler.html) is a centralized metadata 
-> repository that allows you to manage, access, and share metadata about 
-> your data stored in AWS. It acts as a metadata store for your data assets, 
-> enabling various AWS services and your applications to query and connect 
-> to the data they need efficiently.
-
-See a [usage example](/docs/integrations/document_loaders/glue_catalog).
-
-```python
-from langchain_community.document_loaders.glue_catalog import GlueCatalogLoader
-```
-
-## Vector stores
-
-### Amazon OpenSearch Service
-
-> [Amazon OpenSearch Service](https://aws.amazon.com/opensearch-service/) performs 
-> interactive log analytics, real-time application monitoring, website search, and more. `OpenSearch` is 
-> an open source, 
-> distributed search and analytics suite derived from `Elasticsearch`. `Amazon OpenSearch Service` offers the 
-> latest versions of `OpenSearch`, support for many versions of `Elasticsearch`, as well as 
-> visualization capabilities powered by `OpenSearch Dashboards` and `Kibana`.
-
-We need to install several python libraries.
-
-```bash
-pip install boto3 requests requests-aws4auth
-```
-
-See a [usage example](/docs/integrations/vectorstores/opensearch#using-aos-amazon-opensearch-service).
-
-```python
-from langchain_community.vectorstores import OpenSearchVectorSearch
-```
-
-### Amazon DocumentDB Vector Search
-
->[Amazon DocumentDB (with MongoDB Compatibility)](https://docs.aws.amazon.com/documentdb/) makes it easy to set up, operate, and scale MongoDB-compatible databases in the cloud.
-> With Amazon DocumentDB, you can run the same application code and use the same drivers and tools that you use with MongoDB.
-> Vector search for Amazon DocumentDB combines the flexibility and rich querying capability of a JSON-based document database with the power of vector search.
-
-#### Installation and Setup
-
-See [detail configuration instructions](/docs/integrations/vectorstores/documentdb).
-
-We need to install the `pymongo` python package.
-
-```bash
-pip install pymongo
-```
-
-#### Deploy DocumentDB on AWS
-
-[Amazon DocumentDB (with MongoDB Compatibility)](https://docs.aws.amazon.com/documentdb/) is a fast, reliable, and fully managed database service. Amazon DocumentDB makes it easy to set up, operate, and scale MongoDB-compatible databases in the cloud.
-
-AWS offers services for computing, databases, storage, analytics, and other functionality. For an overview of all AWS services, see [Cloud Computing with Amazon Web Services](https://aws.amazon.com/what-is-aws/).
-
-See a [usage example](/docs/integrations/vectorstores/documentdb).
-
-```python
-from langchain_community.vectorstores import DocumentDBVectorSearch
-```
-### Amazon MemoryDB 
-[Amazon MemoryDB](https://aws.amazon.com/memorydb/) is a durable, in-memory database service that delivers ultra-fast performance. MemoryDB is compatible with Redis OSS, a popular open source data store, 
-enabling you to quickly build applications using the same flexible and friendly Redis OSS APIs, and commands that they already use today. 
-
-InMemoryVectorStore class provides a vectorstore to connect with Amazon MemoryDB.
-
-```python
-from langchain_aws.vectorstores.inmemorydb import InMemoryVectorStore
-
-vds = InMemoryVectorStore.from_documents(
-            chunks,
-            embeddings,
-            redis_url="rediss://cluster_endpoint:6379/ssl=True ssl_cert_reqs=none",
-            vector_schema=vector_schema,
-            index_name=INDEX_NAME,
-        )
-```
-See a [usage example](/docs/integrations/vectorstores/memorydb).
-
-## Retrievers
-
-### Amazon Kendra
-
-> [Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/what-is-kendra.html) is an intelligent search service 
-> provided by `Amazon Web Services` (`AWS`). It utilizes advanced natural language processing (NLP) and machine 
-> learning algorithms to enable powerful search capabilities across various data sources within an organization. 
-> `Kendra` is designed to help users find the information they need quickly and accurately, 
-> improving productivity and decision-making.
-
-> With `Kendra`, we can search across a wide range of content types, including documents, FAQs, knowledge bases, 
-> manuals, and websites. It supports multiple languages and can understand complex queries, synonyms, and 
-> contextual meanings to provide highly relevant search results.
-
-We need to install the `langchain-aws` library.
-
-```bash
-pip install langchain-aws
-```
-
-See a [usage example](/docs/integrations/retrievers/amazon_kendra_retriever).
-
-```python
-from langchain_aws import AmazonKendraRetriever
-```
-
-### Amazon Bedrock (Knowledge Bases)
-
-> [Knowledge bases for Amazon Bedrock](https://aws.amazon.com/bedrock/knowledge-bases/) is an 
-> `Amazon Web Services` (`AWS`) offering which lets you quickly build RAG applications by using your 
-> private data to customize foundation model response.
-
-We need to install the `langchain-aws` library.
-
-```bash
-pip install langchain-aws
-```
-
-See a [usage example](/docs/integrations/retrievers/bedrock).
-
-```python
-from langchain_aws import AmazonKnowledgeBasesRetriever
-```
-
-## Tools
-
-### AWS Lambda
-
->[`Amazon AWS Lambda`](https://aws.amazon.com/pm/lambda/) is a serverless computing service provided by 
-> `Amazon Web Services` (`AWS`). It helps developers to build and run applications and services without 
-> provisioning or managing servers. This serverless architecture enables you to focus on writing and 
-> deploying code, while AWS automatically takes care of scaling, patching, and managing the 
-> infrastructure required to run your applications.
-
-We need to install `boto3` python library.
-
-```bash
-pip install boto3
-```
-
-See a [usage example](/docs/integrations/tools/awslambda).
-
-## Memory
-
-### AWS DynamoDB
-
->[AWS DynamoDB](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dynamodb/index.html) 
-> is a fully managed `NoSQL` database service that provides fast and predictable performance with seamless scalability.
- 
-We have to configure the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). 
-
-We need to install the `boto3` library.
-
-```bash
-pip install boto3
-```
-
-See a [usage example](/docs/integrations/memory/aws_dynamodb).
-
-```python
-from langchain_community.chat_message_histories import DynamoDBChatMessageHistory
-```
-
-## Graphs
-
-### Amazon Neptune
-
->[Amazon Neptune](https://aws.amazon.com/neptune/)
-> is a high-performance graph analytics and serverless database for superior scalability and availability.
-
-For the Cypher and SPARQL integrations below, we need to install the `langchain-aws` library.
-
-```bash
-pip install langchain-aws
-```
-
-### Amazon Neptune with Cypher
-
-See a [usage example](/docs/integrations/graphs/amazon_neptune_open_cypher).
-
-```python
-from langchain_aws.graphs import NeptuneGraph
-from langchain_aws.graphs import NeptuneAnalyticsGraph
-from langchain_aws.chains import create_neptune_opencypher_qa_chain
-```
-
-### Amazon Neptune with SPARQL
-
-See a [usage example](/docs/integrations/graphs/amazon_neptune_sparql).
-
-```python
-from langchain_aws.graphs import NeptuneRdfGraph
-from langchain_aws.chains import create_neptune_sparql_qa_chain
-```
-
-
-
-## Callbacks
-
-### Bedrock token usage
-
-```python
-from langchain_community.callbacks.bedrock_anthropic_callback import BedrockAnthropicTokenUsageCallbackHandler
-```
-
-### SageMaker Tracking
-
->[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a fully managed service that is used to quickly 
-> and easily build, train and deploy machine learning (ML) models.
-
->[Amazon SageMaker Experiments](https://docs.aws.amazon.com/sagemaker/latest/dg/experiments.html) is a capability 
-> of `Amazon SageMaker` that lets you organize, track, 
-> compare and evaluate ML experiments and model versions.
- 
-We need to install several python libraries.
-
-```bash
-pip install google-search-results sagemaker
-```
-
-See a [usage example](/docs/integrations/callbacks/sagemaker_tracking).
-
-```python
-from langchain_community.callbacks import SageMakerCallbackHandler
-```
-
-## Chains
-
-### Amazon Comprehend Moderation Chain
-
->[Amazon Comprehend](https://aws.amazon.com/comprehend/) is a natural-language processing (NLP) service that 
-> uses machine learning to uncover valuable insights and connections in text.
-
-
-We need to install the `boto3` and `nltk` libraries.
-
-```bash
-pip install boto3 nltk
-```
-
-See a [usage example](https://python.langchain.com/v0.1/docs/guides/productionization/safety/amazon_comprehend_chain/).
-
-```python
-from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
-```
diff --git a/langchain_md_files/integrations/providers/azlyrics.mdx b/langchain_md_files/integrations/providers/azlyrics.mdx
deleted file mode 100644
index 78cbbc329d62df6e6f786a4e82d895ba81a9fadf..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/azlyrics.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-# AZLyrics
-
->[AZLyrics](https://www.azlyrics.com/) is a large, legal, every day growing collection of lyrics.
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/azlyrics).
-
-```python
-from langchain_community.document_loaders import AZLyricsLoader
-```
diff --git a/langchain_md_files/integrations/providers/azure_ai.mdx b/langchain_md_files/integrations/providers/azure_ai.mdx
deleted file mode 100644
index dd126d55e4164cde8dfa296b77e18c530c5746c6..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/azure_ai.mdx
+++ /dev/null
@@ -1,59 +0,0 @@
-# Azure AI
-
-All functionality related to [Azure AI Foundry](https://learn.microsoft.com/en-us/azure/developer/python/get-started) and its related projects.
-
-Integration packages for Azure AI, Dynamic Sessions, SQL Server are maintained in 
-the [langchain-azure](https://github.com/langchain-ai/langchain-azure) repository.
-
-## Chat models
-
-We recommend developers start with the (`langchain-azure-ai`) to access all the models available in [Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/model-catalog-overview). 
-
-### Azure AI Chat Completions Model 
-
-Access models like Azure OpenAI, DeepSeek R1, Cohere, Phi and Mistral using the `AzureAIChatCompletionsModel` class.
-
-```bash
-pip install -U langchain-azure-ai
-```
-
-Configure your API key and Endpoint.
-
-```bash
-export AZURE_INFERENCE_CREDENTIAL=your-api-key
-export AZURE_INFERENCE_ENDPOINT=your-endpoint
-```
-
-```python
-from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel
-
-llm = AzureAIChatCompletionsModel(
-    model_name="gpt-4o",
-    api_version="2024-05-01-preview",
-)
-
-llm.invoke('Tell me a joke and include some emojis')
-```
-
-## Embedding models
-
-### Azure AI model inference for embeddings
-
-```bash
-pip install -U langchain-azure-ai
-```
-
-Configure your API key and Endpoint.
-
-```bash
-export AZURE_INFERENCE_CREDENTIAL=your-api-key
-export AZURE_INFERENCE_ENDPOINT=your-endpoint
-```
-
-```python
-from langchain_azure_ai.embeddings import AzureAIEmbeddingsModel
-
-embed_model = AzureAIEmbeddingsModel(
-    model_name="text-embedding-ada-002"
-)
-```
diff --git a/langchain_md_files/integrations/providers/baai.mdx b/langchain_md_files/integrations/providers/baai.mdx
deleted file mode 100644
index 58ff1152ef8ef0b79ea455be9ede81ef1ca4e82c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/baai.mdx
+++ /dev/null
@@ -1,44 +0,0 @@
-# BAAI
-
->[Beijing Academy of Artificial Intelligence (BAAI) (Wikipedia)](https://en.wikipedia.org/wiki/Beijing_Academy_of_Artificial_Intelligence), 
-> also known as `Zhiyuan Institute`, is a Chinese non-profit artificial 
-> intelligence (AI) research laboratory. `BAAI` conducts AI research 
-> and is dedicated to promoting collaboration among academia and industry, 
-> as well as fostering top talent and a focus on long-term research on 
-> the fundamentals of AI technology. As a collaborative hub, BAAI's founding 
-> members include leading AI companies, universities, and research institutes.
-
-
-## Embedding Models
-
-### HuggingFaceBgeEmbeddings
-
->[BGE models on the HuggingFace](https://huggingface.co/BAAI/bge-large-en-v1.5) 
-> are one of [the best open-source embedding models](https://huggingface.co/spaces/mteb/leaderboard).
-
-See a [usage example](/docs/integrations/text_embedding/bge_huggingface).
-
-```python
-from langchain_community.embeddings import HuggingFaceBgeEmbeddings
-```
-
-### IpexLLMBgeEmbeddings
-
->[IPEX-LLM](https://github.com/intel-analytics/ipex-llm) is a PyTorch 
-> library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, 
-> discrete GPU such as Arc, Flex and Max) with very low latency.
-
-See a [usage example running model on Intel CPU](/docs/integrations/text_embedding/ipex_llm).
-See a [usage example running model on Intel GPU](/docs/integrations/text_embedding/ipex_llm_gpu).
-
-```python
-from langchain_community.embeddings import IpexLLMBgeEmbeddings
-```
-
-### QuantizedBgeEmbeddings
-
-See a [usage example](/docs/integrations/text_embedding/itrex).
-
-```python
-from langchain_community.embeddings import QuantizedBgeEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/bagel.mdx b/langchain_md_files/integrations/providers/bagel.mdx
deleted file mode 100644
index d76aeff4b60a5c63babd1a0ab51d531088fcc16a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/bagel.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Bagel
-
-> [Bagel](https://www.bagel.net/) (`Open Vector Database for AI`), is like GitHub for AI data.
-It is a collaborative platform where users can create,
-share, and manage vector datasets. It can support private projects for independent developers,
-internal collaborations for enterprises, and public contributions for data DAOs.
-
-## Installation and Setup
-
-```bash
-pip install bagelML
-```
-
-
-## VectorStore
-
-See a [usage example](/docs/integrations/vectorstores/bagel).
-
-```python
-from langchain_community.vectorstores import Bagel
-```
diff --git a/langchain_md_files/integrations/providers/bageldb.mdx b/langchain_md_files/integrations/providers/bageldb.mdx
deleted file mode 100644
index dc9a8ea708ffdf5750921ad30a05b2663ebead27..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/bageldb.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# BagelDB
-
-> [BagelDB](https://www.bageldb.ai/) (`Open Vector Database for AI`), is like GitHub for AI data.
-It is a collaborative platform where users can create,
-share, and manage vector datasets. It can support private projects for independent developers,
-internal collaborations for enterprises, and public contributions for data DAOs.
-
-## Installation and Setup
-
-```bash
-pip install betabageldb
-```
-
-
-## VectorStore
-
-See a [usage example](/docs/integrations/vectorstores/bageldb).
-
-```python
-from langchain_community.vectorstores import Bagel
-```
diff --git a/langchain_md_files/integrations/providers/baichuan.mdx b/langchain_md_files/integrations/providers/baichuan.mdx
deleted file mode 100644
index 409a66d6f8c6706dcff04e401d8b0b0a848b4372..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/baichuan.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# Baichuan
-
->[Baichuan Inc.](https://www.baichuan-ai.com/) is a Chinese startup in the era of AGI, 
-> dedicated to addressing fundamental human needs: Efficiency, Health, and Happiness.
-
-
-## Installation and Setup
-
-Register and get an API key [here](https://platform.baichuan-ai.com/).
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/baichuan).
-
-```python
-from langchain_community.llms import BaichuanLLM
-```
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/baichuan).
-
-```python
-from langchain_community.chat_models import ChatBaichuan
-```
-
-## Embedding models
-
-See a [usage example](/docs/integrations/text_embedding/baichuan).
-
-```python
-from langchain_community.embeddings import BaichuanTextEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/baidu.mdx b/langchain_md_files/integrations/providers/baidu.mdx
deleted file mode 100644
index 67ed5efe40916601f228c3857dbae651408780bb..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/baidu.mdx
+++ /dev/null
@@ -1,74 +0,0 @@
-# Baidu
-
->[Baidu Cloud](https://cloud.baidu.com/) is a cloud service provided by `Baidu, Inc.`, 
-> headquartered in Beijing. It offers a cloud storage service, client software, 
-> file management, resource sharing, and Third Party Integration.
-
-
-## Installation and Setup
-
-Register and get the `Qianfan` `AK` and `SK` keys [here](https://cloud.baidu.com/product/wenxinworkshop).
-
-## LLMs
-
-### Baidu Qianfan
-
-See a [usage example](/docs/integrations/llms/baidu_qianfan_endpoint).
-
-```python
-from langchain_community.llms import QianfanLLMEndpoint
-```
-
-## Chat models
-
-### Qianfan Chat Endpoint
-
-See a [usage example](/docs/integrations/chat/baidu_qianfan_endpoint).
-See another [usage example](/docs/integrations/chat/ernie).
-
-```python
-from langchain_community.chat_models import QianfanChatEndpoint
-```
-
-## Embedding models
-
-### Baidu Qianfan
-
-See a [usage example](/docs/integrations/text_embedding/baidu_qianfan_endpoint).
-See another [usage example](/docs/integrations/text_embedding/ernie).
-
-```python
-from langchain_community.embeddings import QianfanEmbeddingsEndpoint
-```
-
-## Document loaders
-
-### Baidu BOS Directory Loader
-
-```python
-from langchain_community.document_loaders.baiducloud_bos_directory import BaiduBOSDirectoryLoader
-```
-
-### Baidu BOS File Loader
-
-```python
-from langchain_community.document_loaders.baiducloud_bos_file import BaiduBOSFileLoader
-```
-
-## Vector stores
-
-### Baidu Cloud ElasticSearch VectorSearch
-
-See a [usage example](/docs/integrations/vectorstores/baiducloud_vector_search).
-
-```python
-from langchain_community.vectorstores import BESVectorStore
-```
-
-### Baidu VectorDB
-
-See a [usage example](/docs/integrations/vectorstores/baiduvectordb).
-
-```python
-from langchain_community.vectorstores import BaiduVectorDB
-```
diff --git a/langchain_md_files/integrations/providers/bananadev.mdx b/langchain_md_files/integrations/providers/bananadev.mdx
deleted file mode 100644
index 9972bc159a873e7f05f02a9c63debb960150832c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/bananadev.mdx
+++ /dev/null
@@ -1,68 +0,0 @@
-# Banana
-
->[Banana](https://www.banana.dev/) provided serverless GPU inference for AI models, 
-> a CI/CD build pipeline and a simple Python framework (`Potassium`) to server your models.
-
-This page covers how to use the [Banana](https://www.banana.dev) ecosystem within LangChain.
-
-## Installation and Setup
-
-- Install the python package `banana-dev`:
-
-```bash
-pip install banana-dev
-```
-
-- Get an Banana api key from the [Banana.dev dashboard](https://app.banana.dev) and set it as an environment variable (`BANANA_API_KEY`)
-- Get your model's key and url slug from the model's details page.
-
-## Define your Banana Template
-
-You'll need to set up a Github repo for your Banana app. You can get started in 5 minutes using [this guide](https://docs.banana.dev/banana-docs/).
-
-Alternatively, for a ready-to-go LLM example, you can check out Banana's [CodeLlama-7B-Instruct-GPTQ](https://github.com/bananaml/demo-codellama-7b-instruct-gptq) GitHub repository. Just fork it and deploy it within Banana.
-
-Other starter repos are available [here](https://github.com/orgs/bananaml/repositories?q=demo-&type=all&language=&sort=).
-
-## Build the Banana app
-
-To use Banana apps within Langchain, you must include the `outputs` key 
-in the returned json, and the value must be a string.
-
-```python
-# Return the results as a dictionary
-result = {'outputs': result}
-```
-
-An example inference function would be:
-
-```python
-@app.handler("/")
-def handler(context: dict, request: Request) -> Response:
-    """Handle a request to generate code from a prompt."""
-    model = context.get("model")
-    tokenizer = context.get("tokenizer")
-    max_new_tokens = request.json.get("max_new_tokens", 512)
-    temperature = request.json.get("temperature", 0.7)
-    prompt = request.json.get("prompt")
-    prompt_template=f'''[INST] Write code to solve the following coding problem that obeys the constraints and passes the example test cases. Please wrap your code answer using ```:
-    {prompt}
-    [/INST]
-    '''
-    input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
-    output = model.generate(inputs=input_ids, temperature=temperature, max_new_tokens=max_new_tokens)
-    result = tokenizer.decode(output[0])
-    return Response(json={"outputs": result}, status=200)
-```
-
-This example is from the `app.py` file in [CodeLlama-7B-Instruct-GPTQ](https://github.com/bananaml/demo-codellama-7b-instruct-gptq).
-
-
-## LLM
-
-
-```python
-from langchain_community.llms import Banana
-```
-
-See a [usage example](/docs/integrations/llms/banana).
diff --git a/langchain_md_files/integrations/providers/beam.mdx b/langchain_md_files/integrations/providers/beam.mdx
deleted file mode 100644
index 7f723eb0decc4f9cdbd568e9ebbd336ebab18c58..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/beam.mdx
+++ /dev/null
@@ -1,28 +0,0 @@
-# Beam
-
->[Beam](https://www.beam.cloud/) is a cloud computing platform that allows you to run your code 
-> on remote servers with GPUs.
-
-
-## Installation and Setup
-
-- [Create an account](https://www.beam.cloud/)
-- Install the Beam CLI with `curl https://raw.githubusercontent.com/slai-labs/get-beam/main/get-beam.sh -sSfL | sh`
-- Register API keys with `beam configure`
-- Set environment variables (`BEAM_CLIENT_ID`) and (`BEAM_CLIENT_SECRET`)
-- Install the Beam SDK:
-
-```bash
-pip install beam-sdk
-```
-
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/beam).
-
-See another example in the [Beam documentation](https://docs.beam.cloud/examples/langchain).
-
-```python
-from langchain_community.llms.beam import Beam
-```
diff --git a/langchain_md_files/integrations/providers/beautiful_soup.mdx b/langchain_md_files/integrations/providers/beautiful_soup.mdx
deleted file mode 100644
index 289d4059fab015854c2f5976f433e054c291f3be..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/beautiful_soup.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# Beautiful Soup
-
->[Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/) is a Python package for parsing 
-> HTML and XML documents (including having malformed markup, i.e. non-closed tags, so named after tag soup). 
-> It creates a parse tree for parsed pages that can be used to extract data from HTML,[3] which 
-> is useful for web scraping.
-
-## Installation and Setup
-
-```bash
-pip install beautifulsoup4
-```
-
-## Document Transformer
-
-See a [usage example](/docs/integrations/document_transformers/beautiful_soup).
-
-```python
-from langchain_community.document_loaders import BeautifulSoupTransformer
-```
diff --git a/langchain_md_files/integrations/providers/bibtex.mdx b/langchain_md_files/integrations/providers/bibtex.mdx
deleted file mode 100644
index 09cc2fd93d17503ceb595cf608c90a9297f0fe2f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/bibtex.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# BibTeX
-
->[BibTeX](https://www.ctan.org/pkg/bibtex) is a file format and reference management system commonly used in conjunction with `LaTeX` typesetting. It serves as a way to organize and store bibliographic information for academic and research documents.
-
-## Installation and Setup
-
-We have to install the `bibtexparser` and `pymupdf` packages.
-
-```bash
-pip install bibtexparser pymupdf
-```
-
-
-## Document loader
-
-See a [usage example](/docs/integrations/document_loaders/bibtex).
-
-```python
-from langchain_community.document_loaders import BibtexLoader
-```
diff --git a/langchain_md_files/integrations/providers/bilibili.mdx b/langchain_md_files/integrations/providers/bilibili.mdx
deleted file mode 100644
index ec497ec509d11993951aeab9d9eed662b5166199..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/bilibili.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# BiliBili
-
->[Bilibili](https://www.bilibili.tv/) is one of the most beloved long-form video sites in China.
-
-## Installation and Setup
-
-```bash
-pip install bilibili-api-python
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/bilibili).
-
-```python
-from langchain_community.document_loaders import BiliBiliLoader
-```
diff --git a/langchain_md_files/integrations/providers/bittensor.mdx b/langchain_md_files/integrations/providers/bittensor.mdx
deleted file mode 100644
index 137069077dbdc064495499b9abefd4a203768722..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/bittensor.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Bittensor
-
->[Neural Internet Bittensor](https://neuralinternet.ai/) network, an open source protocol 
-> that powers a decentralized, blockchain-based, machine learning network.
-
-## Installation and Setup
-
-Get your API_KEY from [Neural Internet](https://neuralinternet.ai/).
-
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/bittensor).
-
-```python
-from langchain_community.llms import NIBittensorLLM
-```
diff --git a/langchain_md_files/integrations/providers/blackboard.mdx b/langchain_md_files/integrations/providers/blackboard.mdx
deleted file mode 100644
index 09312bc4dfa06b0fb7d189a8b25a2af9f03775ac..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/blackboard.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Blackboard
-
->[Blackboard Learn](https://en.wikipedia.org/wiki/Blackboard_Learn) (previously the `Blackboard Learning Management System`)
-> is a web-based virtual learning environment and learning management system developed by Blackboard Inc. 
-> The software features course management, customizable open architecture, and scalable design that allows 
-> integration with student information systems and authentication protocols. It may be installed on local servers, 
-> hosted by `Blackboard ASP Solutions`, or provided as Software as a Service hosted on Amazon Web Services. 
-> Its main purposes are stated to include the addition of online elements to courses traditionally delivered 
-> face-to-face and development of completely online courses with few or no face-to-face meetings.
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/blackboard).
-
-```python
-from langchain_community.document_loaders import BlackboardLoader
-
-```
diff --git a/langchain_md_files/integrations/providers/bookendai.mdx b/langchain_md_files/integrations/providers/bookendai.mdx
deleted file mode 100644
index e5eecde38d7d003dcd96e4d1df9515ce79d02682..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/bookendai.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
-# bookend.ai
-
-LangChain implements an integration with embeddings provided by [bookend.ai](https://bookend.ai/).
-
-
-## Installation and Setup
-
-
-You need to register and get the `API_KEY` 
-from the [bookend.ai](https://bookend.ai/) website.
-
-## Embedding model
-
-See a [usage example](/docs/integrations/text_embedding/bookend).
-
-```python
-from langchain_community.embeddings import BookendEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/box.mdx b/langchain_md_files/integrations/providers/box.mdx
deleted file mode 100644
index 85ffc1a79e9e6085562cbe958ba54b69abef4d8a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/box.mdx
+++ /dev/null
@@ -1,190 +0,0 @@
-# Box
-
-[Box](https://box.com) is the Intelligent Content Cloud, a single platform that enables 
-organizations to fuel collaboration, manage the entire content lifecycle, secure critical content, 
-and transform business workflows with enterprise AI. Founded in 2005, Box simplifies work for 
-leading global organizations, including AstraZeneca, JLL, Morgan Stanley, and Nationwide.
-
-In this package, we make available a number of ways to include Box content in your AI workflows. 
-
-### Installation and setup
-
-```bash
-pip install -U langchain-box
-
-```
-
-# langchain-box
-
-This package contains the LangChain integration with Box. For more information about
-Box, check out our [developer documentation](https://developer.box.com).
-
-## Pre-requisites
-
-In order to integrate with Box, you need a few things:
-
-* A Box instance — if you are not a current Box customer, sign up for a 
-[free dev account](https://account.box.com/signup/n/developer#ty9l3).
-* A Box app — more on how to 
-[create an app](https://developer.box.com/guides/getting-started/first-application/)
-* Your app approved in your Box instance — This is done by your admin.
-The good news is if you are using a free developer account, you are the admin.
-[Authorize your app](https://developer.box.com/guides/authorization/custom-app-approval/#manual-approval)
-
-## Authentication
-
-The `box-langchain` package offers some flexibility to authentication. The
-most basic authentication method is by using a developer token. This can be
-found in the [Box developer console](https://account.box.com/developers/console) 
-on the configuration screen. This token is purposely short-lived (1 hour) and is 
-intended for development. With this token, you can add it to your environment as 
-`BOX_DEVELOPER_TOKEN`, you can pass it directly to the loader, or you can use the 
-`BoxAuth` authentication helper class.
-
-We will cover passing it directly to the loader in the section below. 
-
-### BoxAuth helper class
-
-`BoxAuth` supports the following authentication methods:
-
-* Token — either a developer token or any token generated through the Box SDK
-* JWT with a service account
-* JWT with a specified user
-* CCG with a service account
-* CCG with a specified user
-
-:::note
-If using JWT authentication, you will need to download the configuration from the Box
-developer console after generating your public/private key pair. Place this file in your 
-application directory structure somewhere. You will use the path to this file when using
-the `BoxAuth` helper class.
-:::
-
-For more information, learn about how to 
-[set up a Box application](https://developer.box.com/guides/getting-started/first-application/),
-and check out the 
-[Box authentication guide](https://developer.box.com/guides/authentication/select/)
-for more about our different authentication options.
-
-Examples:
-
-**Token**
-
-```python
-from langchain_box.document_loaders import BoxLoader
-from langchain_box.utilities import BoxAuth, BoxAuthType
-
-auth = BoxAuth(
-    auth_type=BoxAuthType.TOKEN,
-    box_developer_token=box_developer_token
-)
-
-loader = BoxLoader(
-    box_auth=auth,
-    ...
-)
-```
-
-**JWT with a service account**
-
-```python
-from langchain_box.document_loaders import BoxLoader
-from langchain_box.utilities import BoxAuth, BoxAuthType
-
-auth = BoxAuth(
-    auth_type=BoxAuthType.JWT,
-    box_jwt_path=box_jwt_path
-)
-
-loader = BoxLoader(
-    box_auth=auth,
-    ...
-```
-
-**JWT with a specified user**
-
-```python
-from langchain_box.document_loaders import BoxLoader
-from langchain_box.utilities import BoxAuth, BoxAuthType
-
-auth = BoxAuth(
-    auth_type=BoxAuthType.JWT,
-    box_jwt_path=box_jwt_path,
-    box_user_id=box_user_id
-)
-
-loader = BoxLoader(
-    box_auth=auth,
-    ...
-```
-
-**CCG with a service account**
-
-```python
-from langchain_box.document_loaders import BoxLoader
-from langchain_box.utilities import BoxAuth, BoxAuthType
-
-auth = BoxAuth(
-    auth_type=BoxAuthType.CCG,
-    box_client_id=box_client_id,
-    box_client_secret=box_client_secret,
-    box_enterprise_id=box_enterprise_id
-)
-
-loader = BoxLoader(
-    box_auth=auth,
-    ...
-```
-
-**CCG with a specified user**
-
-```python
-from langchain_box.document_loaders import BoxLoader
-from langchain_box.utilities import BoxAuth, BoxAuthType
-
-auth = BoxAuth(
-    auth_type=BoxAuthType.CCG,
-    box_client_id=box_client_id,
-    box_client_secret=box_client_secret,
-    box_user_id=box_user_id
-)
-
-loader = BoxLoader(
-    box_auth=auth,
-    ...
-```
-
-If you wish to use OAuth2 with the authorization_code flow, please use `BoxAuthType.TOKEN` with the token you have acquired.
-
-## Document Loaders
-
-### BoxLoader
-
-[See usage example](/docs/integrations/document_loaders/box)
-
-```python
-from langchain_box.document_loaders import BoxLoader
-
-```
-
-## Retrievers
-
-### BoxRetriever
-
-[See usage example](/docs/integrations/retrievers/box)
-
-```python
-from langchain_box.retrievers import BoxRetriever
-
-```
-
-## Blob Loaders
-
-### BoxBlobLoader
-
-[See usage example](/docs/integrations/document_loaders/box)
-
-```python
-from langchain_box.blob_loaders import BoxBlobLoader
-
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/brave_search.mdx b/langchain_md_files/integrations/providers/brave_search.mdx
deleted file mode 100644
index 647004302cc4054ce9835a060789171622f3eafb..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/brave_search.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
-# Brave Search
-
-
->[Brave Search](https://en.wikipedia.org/wiki/Brave_Search) is a search engine developed by Brave Software.
-> - `Brave Search` uses its own web index. As of May 2022, it covered over 10 billion pages and was used to serve 92% 
-> of search results without relying on any third-parties, with the remainder being retrieved 
-> server-side from the Bing API or (on an opt-in basis) client-side from Google. According 
-> to Brave, the index was kept "intentionally smaller than that of Google or Bing" in order to 
-> help avoid spam and other low-quality content, with the disadvantage that "Brave Search is 
-> not yet as good as Google in recovering long-tail queries."
->- `Brave Search Premium`: As of April 2023 Brave Search is an ad-free website, but it will 
-> eventually switch to a new model that will include ads and premium users will get an ad-free experience.
-> User data including IP addresses won't be collected from its users by default. A premium account 
-> will be required for opt-in data-collection.
-
-
-## Installation and Setup
-
-To get access to the Brave Search API, you need to [create an account and get an API key](https://api.search.brave.com/app/dashboard).
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/brave_search).
-
-```python
-from langchain_community.document_loaders import BraveSearchLoader
-```
-
-## Tool
-
-See a [usage example](/docs/integrations/tools/brave_search).
-
-```python
-from langchain.tools import BraveSearch
-```
diff --git a/langchain_md_files/integrations/providers/browserbase.mdx b/langchain_md_files/integrations/providers/browserbase.mdx
deleted file mode 100644
index 0bd939ffbfc66e75a92286cdbad2d0079a3ef354..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/browserbase.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-# Browserbase
-
-[Browserbase](https://browserbase.com) is a developer platform to reliably run, manage, and monitor headless browsers.
-
-Power your AI data retrievals with:
-- [Serverless Infrastructure](https://docs.browserbase.com/under-the-hood) providing reliable browsers to extract data from complex UIs
-- [Stealth Mode](https://docs.browserbase.com/features/stealth-mode) with included fingerprinting tactics and automatic captcha solving
-- [Session Debugger](https://docs.browserbase.com/features/sessions) to inspect your Browser Session with networks timeline and logs
-- [Live Debug](https://docs.browserbase.com/guides/session-debug-connection/browser-remote-control) to quickly debug your automation
-
-## Installation and Setup
-
-- Get an API key and Project ID from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`, `BROWSERBASE_PROJECT_ID`).
-- Install the [Browserbase SDK](http://github.com/browserbase/python-sdk):
-
-```python
-pip install browserbase
-```
-
-## Document loader
-
-See a [usage example](/docs/integrations/document_loaders/browserbase).
-
-```python
-from langchain_community.document_loaders import BrowserbaseLoader
-```
-
-## Multi-Modal
-
-See a [usage example](/docs/integrations/document_loaders/browserbase).
-
-```python
-from browserbase.helpers.gpt4 import GPT4VImage, GPT4VImageDetail
-```
diff --git a/langchain_md_files/integrations/providers/browserless.mdx b/langchain_md_files/integrations/providers/browserless.mdx
deleted file mode 100644
index 0fe4463af921cd716b26c77a85e101a8e30ed1ae..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/browserless.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
-# Browserless
-
->[Browserless](https://www.browserless.io/docs/start) is a service that allows you to 
-> run headless Chrome instances in the cloud. It’s a great way to run browser-based 
-> automation at scale without having to worry about managing your own infrastructure.
-
-## Installation and Setup
-
-We have to get the API key [here](https://www.browserless.io/pricing/).
-
-
-## Document loader
-
-See a [usage example](/docs/integrations/document_loaders/browserless).
-
-```python
-from langchain_community.document_loaders import BrowserlessLoader
-```
diff --git a/langchain_md_files/integrations/providers/byte_dance.mdx b/langchain_md_files/integrations/providers/byte_dance.mdx
deleted file mode 100644
index 25f6e2533ef6263dc0c87f650c509f4f5acd8504..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/byte_dance.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
-# ByteDance
-
->[ByteDance](https://bytedance.com/) is a Chinese internet technology company.
-
-## Installation and Setup
-
-Get the access token.
-You can find the access instructions [here](https://open.larksuite.com/document)
-
-
-## Document Loaders
-
->[Lark Suite](https://www.larksuite.com/) is an enterprise collaboration platform 
-> developed by `ByteDance`.
-
-### Lark Suite for Document
-
-See a [usage example](/docs/integrations/document_loaders/larksuite/#load-from-document).
-
-```python
-from langchain_community.document_loaders.larksuite import LarkSuiteDocLoader
-```
-
-### Lark Suite for Wiki
-
-See a [usage example](/docs/integrations/document_loaders/larksuite/#load-from-wiki).
-
-```python
-from langchain_community.document_loaders.larksuite import LarkSuiteWikiLoader
-```
diff --git a/langchain_md_files/integrations/providers/cassandra.mdx b/langchain_md_files/integrations/providers/cassandra.mdx
deleted file mode 100644
index 87cf5ebe0c92891bae1d12984cc4ee5fa9940424..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cassandra.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
-# Cassandra
-
-> [Apache Cassandra®](https://cassandra.apache.org/) is a NoSQL, row-oriented, highly scalable and highly available database.
-> Starting with version 5.0, the database ships with [vector search capabilities](https://cassandra.apache.org/doc/trunk/cassandra/vector-search/overview.html).
-
-The integrations outlined in this page can be used with `Cassandra` as well as other CQL-compatible databases, 
-i.e. those using the `Cassandra Query Language` protocol.
-
-
-## Installation and Setup
-
-Install the following Python package:
-
-```bash
-pip install "cassio>=0.1.6"
-```
-
-## Vector Store
-
-```python
-from langchain_community.vectorstores import Cassandra
-```
-
-Learn more in the [example notebook](/docs/integrations/vectorstores/cassandra).
-
-## Chat message history
-
-```python
-from langchain_community.chat_message_histories import CassandraChatMessageHistory
-```
-
-Learn more in the [example notebook](/docs/integrations/memory/cassandra_chat_message_history).
-
-
-## LLM Cache
-
-```python
-from langchain.globals import set_llm_cache
-from langchain_community.cache import CassandraCache
-set_llm_cache(CassandraCache())
-```
-
-Learn more in the [example notebook](/docs/integrations/llm_caching#cassandra-caches) (scroll to the Cassandra section).
-
-
-## Semantic LLM Cache
-
-```python
-from langchain.globals import set_llm_cache
-from langchain_community.cache import CassandraSemanticCache
-set_llm_cache(CassandraSemanticCache(
-    embedding=my_embedding,
-    table_name="my_store",
-))
-```
-
-Learn more in the [example notebook](/docs/integrations/llm_caching#cassandra-caches) (scroll to the appropriate section).
-
-## Document loader
-
-```python
-from langchain_community.document_loaders import CassandraLoader
-```
-
-Learn more in the [example notebook](/docs/integrations/document_loaders/cassandra).
-
-#### Attribution statement
-
-> Apache Cassandra, Cassandra and Apache are either registered trademarks or trademarks of 
-> the [Apache Software Foundation](http://www.apache.org/) in the United States and/or other countries.
-
-## Toolkit
-
-The `Cassandra Database toolkit` enables AI engineers to efficiently integrate agents
-with Cassandra data.
-
-```python
-from langchain_community.agent_toolkits.cassandra_database.toolkit import (
-    CassandraDatabaseToolkit,
-)
-```
-
-Learn more in the [example notebook](/docs/integrations/tools/cassandra_database).
-
-
-Cassandra Database individual tools:
-
-### Get Schema
-
-Tool for getting the schema of a keyspace in an Apache Cassandra database.
-
-```python
-from langchain_community.tools import GetSchemaCassandraDatabaseTool
-```
-
-### Get Table Data
-
-Tool for getting data from a table in an Apache Cassandra database.
-
-```python
-from langchain_community.tools import GetTableDataCassandraDatabaseTool
-```
-
-### Query
-
-Tool for querying an Apache Cassandra database with provided CQL.
-
-```python
-from langchain_community.tools import QueryCassandraDatabaseTool
-```
diff --git a/langchain_md_files/integrations/providers/cerebras.mdx b/langchain_md_files/integrations/providers/cerebras.mdx
deleted file mode 100644
index 10201bc0240bcc7162a2f2b0d434a2d32fb689cb..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cerebras.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
-# Cerebras
-
-At Cerebras, we've developed the world's largest and fastest AI processor, the Wafer-Scale Engine-3 (WSE-3). The Cerebras CS-3 system, powered by the WSE-3, represents a new class of AI supercomputer that sets the standard for generative AI training and inference with unparalleled performance and scalability.
-
-With Cerebras as your inference provider, you can:
-- Achieve unprecedented speed for AI inference workloads
-- Build commercially with high throughput
-- Effortlessly scale your AI workloads with our seamless clustering technology
-
-Our CS-3 systems can be quickly and easily clustered to create the largest AI supercomputers in the world, making it simple to place and run the largest models. Leading corporations, research institutions, and governments are already using Cerebras solutions to develop proprietary models and train popular open-source models.
-
-Want to experience the power of Cerebras? Check out our [website](https://cerebras.ai) for more resources and explore options for accessing our technology through the Cerebras Cloud or on-premise deployments!
-
-For more information about Cerebras Cloud, visit [cloud.cerebras.ai](https://cloud.cerebras.ai/). Our API reference is available at [inference-docs.cerebras.ai](https://inference-docs.cerebras.ai/).
-
-## Installation and Setup
-Install the integration package:
-
-```bash
-pip install langchain-cerebras
-```
-
-## API Key
-Get an API Key from [cloud.cerebras.ai](https://cloud.cerebras.ai/) and add it to your environment variables:
-```
-export CEREBRAS_API_KEY="your-api-key-here"
-```
-
-## Chat Model
-See a [usage example](/docs/integrations/chat/cerebras).
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/cerebriumai.mdx b/langchain_md_files/integrations/providers/cerebriumai.mdx
deleted file mode 100644
index 912dbd90f61e634307a45cf875b72e4a946e09d3..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cerebriumai.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
-# CerebriumAI
-
->[Cerebrium](https://docs.cerebrium.ai/cerebrium/getting-started/introduction)  is a serverless GPU infrastructure provider.
-> It provides API access to several LLM models.
-
-See the examples in the [CerebriumAI documentation](https://docs.cerebrium.ai/examples/langchain).
-
-## Installation and Setup
-
-- Install a python package:
-```bash
-pip install cerebrium
-```
-
-- [Get an CerebriumAI api key](https://docs.cerebrium.ai/cerebrium/getting-started/installation) and set 
-  it as an environment variable (`CEREBRIUMAI_API_KEY`)
-
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/cerebriumai).
-
-
-```python
-from langchain_community.llms import CerebriumAI
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/chaindesk.mdx b/langchain_md_files/integrations/providers/chaindesk.mdx
deleted file mode 100644
index 7cfd5e96b88f3777dfcef4be86bd02f9da04e166..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/chaindesk.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Chaindesk
-
->[Chaindesk](https://chaindesk.ai) is an [open-source](https://github.com/gmpetrov/databerry) document retrieval platform that helps to connect your personal data with Large Language Models.
-
-
-## Installation and Setup
-
-We need to sign up for Chaindesk, create a datastore, add some data and get your datastore api endpoint url. 
-We need the [API Key](https://docs.chaindesk.ai/api-reference/authentication).
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/chaindesk).
-
-```python
-from langchain.retrievers import ChaindeskRetriever
-```
diff --git a/langchain_md_files/integrations/providers/chroma.mdx b/langchain_md_files/integrations/providers/chroma.mdx
deleted file mode 100644
index d5436c9dc2aef54dd4e0b0bec5f4f646ea0ffb67..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/chroma.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
-# Chroma
-
->[Chroma](https://docs.trychroma.com/getting-started) is a database for building AI applications with embeddings.
-
-## Installation and Setup
-
-```bash
-pip install langchain-chroma
-```
-
-
-## VectorStore
-
-There exists a wrapper around Chroma vector databases, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-```python
-from langchain_chroma import Chroma
-```
-
-For a more detailed walkthrough of the Chroma wrapper, see [this notebook](/docs/integrations/vectorstores/chroma)
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/self_query/chroma_self_query).
-
-```python
-from langchain.retrievers import SelfQueryRetriever
-```
diff --git a/langchain_md_files/integrations/providers/clarifai.mdx b/langchain_md_files/integrations/providers/clarifai.mdx
deleted file mode 100644
index e783833255490c1e4cbce95f018f6578baff4f42..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/clarifai.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
-# Clarifai
-
->[Clarifai](https://clarifai.com) is one of first deep learning platforms having been founded in 2013. Clarifai provides an AI platform with the full AI lifecycle for data exploration, data labeling, model training, evaluation and inference around images, video, text and audio data. In the LangChain ecosystem, as far as we're aware, Clarifai is the only provider that supports LLMs, embeddings and a vector store in one production scale platform, making it an excellent choice to operationalize your LangChain implementations.
->
-> `Clarifai` provides 1,000s of AI models for many different use cases. You can [explore them here](https://clarifai.com/explore) to find the one most suited for your use case. These models include those created by other providers such as OpenAI, Anthropic, Cohere, AI21, etc. as well as state of the art from open source such as Falcon, InstructorXL, etc. so that you build the best in AI into your products. You'll find these organized by the creator's user_id and into projects we call applications denoted by their app_id. Those IDs will be needed in additional to the model_id and optionally the version_id, so make note of all these IDs once you found the best model for your use case!
->
->Also note that given there are many models for images, video, text and audio understanding, you can build some interested AI agents that utilize the variety of AI models as experts to understand those data types.
-
-
-## Installation and Setup
-- Install the Python SDK:
-```bash
-pip install clarifai
-```
-[Sign-up](https://clarifai.com/signup) for a Clarifai account, then get a personal access token to access the Clarifai API from your [security settings](https://clarifai.com/settings/security) and set it as an environment variable (`CLARIFAI_PAT`).
-
-
-## LLMs
-
-To find the selection of LLMs in the Clarifai platform you can select the text to text model type [here](https://clarifai.com/explore/models?filterData=%5B%7B%22field%22%3A%22model_type_id%22%2C%22value%22%3A%5B%22text-to-text%22%5D%7D%5D&page=1&perPage=24).
-
-```python
-from langchain_community.llms import Clarifai
-llm = Clarifai(pat=CLARIFAI_PAT, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
-```
-
-For more details, the docs on the Clarifai LLM wrapper provide a [detailed walkthrough](/docs/integrations/llms/clarifai).
-
-
-## Embedding Models
-
-To find the selection of embeddings models in the Clarifai platform you can select the text to embedding model type [here](https://clarifai.com/explore/models?page=1&perPage=24&filterData=%5B%7B%22field%22%3A%22model_type_id%22%2C%22value%22%3A%5B%22text-embedder%22%5D%7D%5D).
-
-There is a Clarifai Embedding model in LangChain, which you can access with:
-```python
-from langchain_community.embeddings import ClarifaiEmbeddings
-embeddings = ClarifaiEmbeddings(pat=CLARIFAI_PAT, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
-```
-
-See a [usage example](/docs/integrations/document_loaders/couchbase).
-
-
-## Vectorstore
-
-Clarifai's vector DB was launched in 2016 and has been optimized to support live search queries. With workflows in the Clarifai platform, you data is automatically indexed by am embedding model and optionally other models as well to index that information in the DB for search. You can query the DB not only via the vectors but also filter by metadata matches, other AI predicted concepts, and even do geo-coordinate search. Simply create an application, select the appropriate base workflow for your type of data, and upload it (through the API as [documented here](https://docs.clarifai.com/api-guide/data/create-get-update-delete) or the UIs at clarifai.com).
-
-You can also add data directly from LangChain as well, and the auto-indexing will take place for you. You'll notice this is a little different than other vectorstores where you need to provide an embedding model in their constructor and have LangChain coordinate getting the embeddings from text and writing those to the index. Not only is it more convenient, but it's much more scalable to use Clarifai's distributed cloud to do all the index in the background.
-
-```python
-from langchain_community.vectorstores import Clarifai
-clarifai_vector_db = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts=texts, pat=CLARIFAI_PAT, number_of_docs=NUMBER_OF_DOCS, metadatas = metadatas)
-```
-For more details, the docs on the Clarifai vector store provide a [detailed walkthrough](/docs/integrations/vectorstores/clarifai).
diff --git a/langchain_md_files/integrations/providers/clickhouse.mdx b/langchain_md_files/integrations/providers/clickhouse.mdx
deleted file mode 100644
index 64e4608c535fde2b3733fb2f521fc7173545ed7d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/clickhouse.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# ClickHouse
-
-> [ClickHouse](https://clickhouse.com/) is the fast and resource efficient open-source database for real-time 
-> apps and analytics with full SQL support and a wide range of functions to assist users in writing analytical queries. 
-> It has data structures and distance search functions (like `L2Distance`) as well as 
-> [approximate nearest neighbor search indexes](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/annindexes) 
-> That enables ClickHouse to be used as a high performance and scalable vector database to store and search vectors with SQL.
-
-
-## Installation and Setup
-
-We need to install `clickhouse-connect` python package.
-
-```bash
-pip install clickhouse-connect
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/clickhouse).
-
-```python
-from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
-```
-
diff --git a/langchain_md_files/integrations/providers/clickup.mdx b/langchain_md_files/integrations/providers/clickup.mdx
deleted file mode 100644
index 256ae2cace4e6a151438945698676131ff0b1bb5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/clickup.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# ClickUp
-
->[ClickUp](https://clickup.com/) is an all-in-one productivity platform that provides small and large teams across industries with flexible and customizable work management solutions, tools, and functions.
->
->It is a cloud-based project management solution for businesses of all sizes featuring communication and collaboration tools to help achieve organizational goals.
-
-## Installation and Setup
-
-1. Create a [ClickUp App](https://help.clickup.com/hc/en-us/articles/6303422883095-Create-your-own-app-with-the-ClickUp-API)
-2. Follow [these steps](https://clickup.com/api/developer-portal/authentication/) to get your client_id and client_secret.
-
-## Toolkits
-
-```python
-from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
-from langchain_community.utilities.clickup import ClickupAPIWrapper
-```
-
-See a [usage example](/docs/integrations/tools/clickup).
-
diff --git a/langchain_md_files/integrations/providers/cloudflare.mdx b/langchain_md_files/integrations/providers/cloudflare.mdx
deleted file mode 100644
index d7a4e8b8bed14106ed0212b5817ea7faab193063..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cloudflare.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Cloudflare
-
->[Cloudflare, Inc. (Wikipedia)](https://en.wikipedia.org/wiki/Cloudflare) is an American company that provides 
-> content delivery network services, cloud cybersecurity, DDoS mitigation, and ICANN-accredited 
-> domain registration services.
-
->[Cloudflare Workers AI](https://developers.cloudflare.com/workers-ai/) allows you to run machine 
-> learning models, on the `Cloudflare` network, from your code via REST API.
-
-
-## LLMs
-
-See [installation instructions and usage example](/docs/integrations/llms/cloudflare_workersai).
-
-```python
-from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
-```
-
-## Embedding models
-
-See [installation instructions and usage example](/docs/integrations/text_embedding/cloudflare_workersai).
-
-```python
-from langchain_community.embeddings.cloudflare_workersai import CloudflareWorkersAIEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/clova.mdx b/langchain_md_files/integrations/providers/clova.mdx
deleted file mode 100644
index b10aa930511363c9cc98699149354734a4ebf90c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/clova.mdx
+++ /dev/null
@@ -1,14 +0,0 @@
-# Clova
-
->[CLOVA Studio](https://api.ncloud-docs.com/docs/ai-naver-clovastudio-summary) is a service 
-> of [Naver Cloud Platform](https://www.ncloud.com/) that uses `HyperCLOVA` language models, 
-> a hyperscale AI technology, to output phrases generated through AI technology based on user input.
-
-
-## Embedding models
-
-See [installation instructions and usage example](/docs/integrations/text_embedding/clova).
-
-```python
-from langchain_community.embeddings import ClovaEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/cnosdb.mdx b/langchain_md_files/integrations/providers/cnosdb.mdx
deleted file mode 100644
index 6d65ce29cf86fd7c9291b130af5566174ff1cfa9..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cnosdb.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
-# CnosDB
-> [CnosDB](https://github.com/cnosdb/cnosdb) is an open-source distributed time series database with high performance, high compression rate and high ease of use.
-
-## Installation and Setup
-
-```python
-pip install cnos-connector
-```
-
-## Connecting to CnosDB
-You can connect to CnosDB using the `SQLDatabase.from_cnosdb()` method.
-### Syntax
-```python
-def SQLDatabase.from_cnosdb(url: str = "127.0.0.1:8902",
-                              user: str = "root",
-                              password: str = "",
-                              tenant: str = "cnosdb",
-                              database: str = "public")
-```
-Args:
-1. url (str): The HTTP connection host name and port number of the CnosDB
-                service, excluding "http://" or "https://", with a default value
-                of "127.0.0.1:8902".
-2. user (str): The username used to connect to the CnosDB service, with a
-                default value of "root".
-3. password (str): The password of the user connecting to the CnosDB service,
-                with a default value of "".
-4. tenant (str): The name of the tenant used to connect to the CnosDB service,
-                with a default value of "cnosdb".
-5. database (str): The name of the database in the CnosDB tenant.
-## Examples
-```python
-# Connecting to CnosDB with SQLDatabase Wrapper
-from langchain_community.utilities import SQLDatabase
-
-db = SQLDatabase.from_cnosdb()
-```
-```python
-# Creating a OpenAI Chat LLM Wrapper
-from langchain_openai import ChatOpenAI
-
-llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
-```
-
-### SQL Database Chain
-This example demonstrates the use of the SQL Chain for answering a question over a CnosDB.
-```python
-from langchain_community.utilities import SQLDatabaseChain
-
-db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)
-
-db_chain.run(
-    "What is the average temperature of air at station XiaoMaiDao between October 19, 2022 and Occtober 20, 2022?"
-)
-```
-```shell
-> Entering new  chain...
-What is the average temperature of air at station XiaoMaiDao between October 19, 2022 and Occtober 20, 2022?
-SQLQuery:SELECT AVG(temperature) FROM air WHERE station = 'XiaoMaiDao' AND time >= '2022-10-19' AND time < '2022-10-20'
-SQLResult: [(68.0,)]
-Answer:The average temperature of air at station XiaoMaiDao between October 19, 2022 and October 20, 2022 is 68.0.
-> Finished chain.
-```
-### SQL Database Agent
-This example demonstrates the use of the SQL Database Agent for answering questions over a CnosDB.
-```python
-from langchain.agents import create_sql_agent
-from langchain_community.agent_toolkits import SQLDatabaseToolkit
-
-toolkit = SQLDatabaseToolkit(db=db, llm=llm)
-agent = create_sql_agent(llm=llm, toolkit=toolkit, verbose=True)
-```
-```python
-agent.run(
-    "What is the average temperature of air at station XiaoMaiDao between October 19, 2022 and Occtober 20, 2022?"
-)
-```
-```shell
-> Entering new  chain...
-Action: sql_db_list_tables
-Action Input: ""
-Observation: air
-Thought:The "air" table seems relevant to the question. I should query the schema of the "air" table to see what columns are available.
-Action: sql_db_schema
-Action Input: "air"
-Observation:
-CREATE TABLE air (
-	pressure FLOAT,
-	station STRING,
-	temperature FLOAT,
-	time TIMESTAMP,
-	visibility FLOAT
-)
-
-/*
-3 rows from air table:
-pressure	station	temperature	time	visibility
-75.0	XiaoMaiDao	67.0	2022-10-19T03:40:00	54.0
-77.0	XiaoMaiDao	69.0	2022-10-19T04:40:00	56.0
-76.0	XiaoMaiDao	68.0	2022-10-19T05:40:00	55.0
-*/
-Thought:The "temperature" column in the "air" table is relevant to the question. I can query the average temperature between the specified dates.
-Action: sql_db_query
-Action Input: "SELECT AVG(temperature) FROM air WHERE station = 'XiaoMaiDao' AND time >= '2022-10-19' AND time &lt;= '2022-10-20'"
-Observation: [(68.0,)]
-Thought:The average temperature of air at station XiaoMaiDao between October 19, 2022 and October 20, 2022 is 68.0.
-Final Answer: 68.0
-
-> Finished chain.
-```
diff --git a/langchain_md_files/integrations/providers/cognee.mdx b/langchain_md_files/integrations/providers/cognee.mdx
deleted file mode 100644
index 8cc40a17288b88747af31e387abb2b0a7d5104a4..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cognee.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
-# Cognee
-
-Cognee implements scalable, modular ECL (Extract, Cognify, Load) pipelines that allow
-you to interconnect and retrieve past conversations, documents, and audio
-transcriptions while reducing hallucinations, developer effort, and cost.
-
-Cognee merges graph and vector databases to uncover hidden relationships and new
-patterns in your data. You can automatically model, load and retrieve entities and
-objects representing your business domain and analyze their relationships, uncovering
-insights that neither vector stores nor graph stores alone can provide.
-
-Try it in a Google Colab  <a href="https://colab.research.google.com/drive/1g-Qnx6l_ecHZi0IOw23rg0qC4TYvEvWZ?usp=sharing">notebook</a>  or have a look at the <a href="https://docs.cognee.ai">documentation</a>.
-
-If you have questions, join cognee <a href="https://discord.gg/NQPKmU5CCg">Discord</a> community.
-
-Have you seen cognee's <a href="https://github.com/topoteretes/cognee-starter">starter repo</a>? Check it out!
-
-
-## Installation and Setup
-
-```bash
-pip install langchain-cognee
-```
-
-## Retrievers
-
-See detail on available retrievers [here](/docs/integrations/retrievers/cognee).
diff --git a/langchain_md_files/integrations/providers/cogniswitch.mdx b/langchain_md_files/integrations/providers/cogniswitch.mdx
deleted file mode 100644
index d8aee6a4c9d5c8ec21a32caf16eb1d62555c18e6..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cogniswitch.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
-# CogniSwitch
-
->[CogniSwitch](https://www.cogniswitch.ai/aboutus) is an API based data platform that 
-> enhances enterprise data by extracting entities, concepts and their relationships 
-> thereby converting this data into a multidimensional format and storing it in 
-> a database that can accommodate these enhancements. In our case the data is stored 
-> in a knowledge graph. This enhanced data is now ready for consumption by LLMs and 
-> other GenAI applications ensuring the data is consumable and context can be maintained. 
-> Thereby eliminating hallucinations and delivering accuracy.
-
-## Toolkit
-
-See [installation instructions and usage example](/docs/integrations/tools/cogniswitch).
-
-```python
-from langchain_community.agent_toolkits import CogniswitchToolkit
-```
-
-## Tools
-
-### CogniswitchKnowledgeRequest
-
->Tool that uses the CogniSwitch service to answer questions.
-
-```python
-from langchain_community.tools.cogniswitch.tool import CogniswitchKnowledgeRequest
-```
-
-### CogniswitchKnowledgeSourceFile
-
->Tool that uses the CogniSwitch services to store data from file.
-
-```python
-from langchain_community.tools.cogniswitch.tool import CogniswitchKnowledgeSourceFile
-```
-
-### CogniswitchKnowledgeSourceURL
-
->Tool that uses the CogniSwitch services to store data from a URL.
-
-```python
-from langchain_community.tools.cogniswitch.tool import CogniswitchKnowledgeSourceURL
-```
-
-### CogniswitchKnowledgeStatus
-
->Tool that uses the CogniSwitch services to get the status of the document or url uploaded.
-
-```python
-from langchain_community.tools.cogniswitch.tool import CogniswitchKnowledgeStatus
-```
-
-
diff --git a/langchain_md_files/integrations/providers/cohere.mdx b/langchain_md_files/integrations/providers/cohere.mdx
deleted file mode 100644
index 0bd8417317de8f5180417fdb91a3dc1abd288004..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cohere.mdx
+++ /dev/null
@@ -1,157 +0,0 @@
-# Cohere
-
->[Cohere](https://cohere.ai/about) is a Canadian startup that provides natural language processing models
-> that help companies improve human-machine interactions.
-
-## Installation and Setup
-- Install the Python SDK :
-```bash
-pip install langchain-cohere
-```
-
-Get a [Cohere api key](https://dashboard.cohere.ai/) and set it as an environment variable (`COHERE_API_KEY`)
-
-## Cohere langchain integrations
-
-|API|description|Endpoint docs|Import|Example usage|
-|---|---|---|---|---|
-|Chat|Build chat bots|[chat](https://docs.cohere.com/reference/chat)|`from langchain_cohere import ChatCohere`|[cohere.ipynb](/docs/integrations/chat/cohere)|
-|LLM|Generate text|[generate](https://docs.cohere.com/reference/generate)|`from langchain_cohere.llms import Cohere`|[cohere.ipynb](/docs/integrations/llms/cohere)|
-|RAG Retriever|Connect to external data sources|[chat + rag](https://docs.cohere.com/reference/chat)|`from langchain.retrievers import CohereRagRetriever`|[cohere.ipynb](/docs/integrations/retrievers/cohere)|
-|Text Embedding|Embed strings to vectors|[embed](https://docs.cohere.com/reference/embed)|`from langchain_cohere import CohereEmbeddings`|[cohere.ipynb](/docs/integrations/text_embedding/cohere)|
-|Rerank Retriever|Rank strings based on relevance|[rerank](https://docs.cohere.com/reference/rerank)|`from langchain.retrievers.document_compressors import CohereRerank`|[cohere.ipynb](/docs/integrations/retrievers/cohere-reranker)|
-
-## Quick copy examples
-
-### Chat
-
-```python
-from langchain_cohere import ChatCohere
-from langchain_core.messages import HumanMessage
-chat = ChatCohere()
-messages = [HumanMessage(content="knock knock")]
-print(chat.invoke(messages))
-```
-
-Usage of the Cohere [chat model](/docs/integrations/chat/cohere)
-
-### LLM
-
-
-```python
-from langchain_cohere.llms import Cohere
-
-llm = Cohere()
-print(llm.invoke("Come up with a pet name"))
-```
-
-Usage of the Cohere (legacy) [LLM model](/docs/integrations/llms/cohere) 
-
-### Tool calling
-```python
-from langchain_cohere import ChatCohere
-from langchain_core.messages import (
-    HumanMessage,
-    ToolMessage,
-)
-from langchain_core.tools import tool
-
-@tool
-def magic_function(number: int) -> int:
-    """Applies a magic operation to an integer
-
-    Args:
-        number: Number to have magic operation performed on
-    """
-    return number + 10
-
-def invoke_tools(tool_calls, messages):
-    for tool_call in tool_calls:
-        selected_tool = {"magic_function":magic_function}[
-            tool_call["name"].lower()
-        ]
-        tool_output = selected_tool.invoke(tool_call["args"])
-        messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))
-    return messages
-
-tools = [magic_function]
-
-llm = ChatCohere()
-llm_with_tools = llm.bind_tools(tools=tools)
-messages = [
-    HumanMessage(
-        content="What is the value of magic_function(2)?"
-    )
-]
-
-res = llm_with_tools.invoke(messages)
-while res.tool_calls:
-    messages.append(res)
-    messages = invoke_tools(res.tool_calls, messages)
-    res = llm_with_tools.invoke(messages)
-
-print(res.content)
-```
-Tool calling with Cohere LLM can be done by binding the necessary tools to the llm as seen above. 
-An alternative, is to support multi hop tool calling with the ReAct agent as seen below.
-
-### ReAct Agent
-
-The agent is based on the paper
-[ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629).
-
-```python
-from langchain_community.tools.tavily_search import TavilySearchResults
-from langchain_cohere import ChatCohere, create_cohere_react_agent
-from langchain_core.prompts import ChatPromptTemplate
-from langchain.agents import AgentExecutor
-
-llm = ChatCohere()
-
-internet_search = TavilySearchResults(max_results=4)
-internet_search.name = "internet_search"
-internet_search.description = "Route a user query to the internet"
-
-prompt = ChatPromptTemplate.from_template("{input}")
-
-agent = create_cohere_react_agent(
-    llm,
-    [internet_search],
-    prompt
-)
-
-agent_executor = AgentExecutor(agent=agent, tools=[internet_search], verbose=True)
-
-agent_executor.invoke({
-    "input": "In what year was the company that was founded as Sound of Music added to the S&P 500?",
-})
-```
-The ReAct agent can be used to call multiple tools in sequence.
-
-### RAG Retriever
-
-```python
-from langchain_cohere import ChatCohere
-from langchain.retrievers import CohereRagRetriever
-from langchain_core.documents import Document
-
-rag = CohereRagRetriever(llm=ChatCohere())
-print(rag.invoke("What is cohere ai?"))
-```
-
-Usage of the Cohere [RAG Retriever](/docs/integrations/retrievers/cohere)
-
-### Text Embedding
-
-```python
-from langchain_cohere import CohereEmbeddings
-
-embeddings = CohereEmbeddings(model="embed-english-light-v3.0")
-print(embeddings.embed_documents(["This is a test document."]))
-```
-
-Usage of the Cohere [Text Embeddings model](/docs/integrations/text_embedding/cohere)
-
-### Reranker
-
-Usage of the Cohere [Reranker](/docs/integrations/retrievers/cohere-reranker)
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/college_confidential.mdx b/langchain_md_files/integrations/providers/college_confidential.mdx
deleted file mode 100644
index 4f081945b944b1842f97a003a844d8d2447fe677..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/college_confidential.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-# College Confidential
-
->[College Confidential](https://www.collegeconfidential.com/) gives information on 3,800+ colleges and universities.
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/college_confidential).
-
-```python
-from langchain_community.document_loaders import CollegeConfidentialLoader
-```
diff --git a/langchain_md_files/integrations/providers/confident.mdx b/langchain_md_files/integrations/providers/confident.mdx
deleted file mode 100644
index 51de57342146b41279ae602f18929851258ce592..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/confident.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
-# Confident AI
-
->[Confident AI](https://confident-ai.com) is a creator of the `DeepEval`.
->
->[DeepEval](https://github.com/confident-ai/deepeval) is a package for unit testing LLMs.
-> Using `DeepEval`, everyone can build robust language models through faster iterations
-> using both unit testing and integration testing. `DeepEval provides support for each step in the iteration
-> from synthetic data creation to testing.
-
-## Installation and Setup
-
-You need to get the [DeepEval API credentials](https://app.confident-ai.com).
-
-You need to install the `DeepEval` Python package:
-
-```bash
-pip install deepeval
-```
-
-## Callbacks
-
-See an [example](/docs/integrations/callbacks/confident).
-
-```python
-from langchain.callbacks.confident_callback import DeepEvalCallbackHandler
-```
diff --git a/langchain_md_files/integrations/providers/confluence.mdx b/langchain_md_files/integrations/providers/confluence.mdx
deleted file mode 100644
index 27a7e274a21ef98ab8b52e60c088b850245e77c1..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/confluence.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Confluence
-
->[Confluence](https://www.atlassian.com/software/confluence) is a wiki collaboration platform that saves and organizes all of the project-related material. `Confluence` is a knowledge base that primarily handles content management activities. 
-
-
-## Installation and Setup
-
-```bash
-pip install atlassian-python-api
-```
-
-We need to set up `username/api_key` or `Oauth2 login`. 
-See [instructions](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/).
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/confluence).
-
-```python
-from langchain_community.document_loaders import ConfluenceLoader
-```
diff --git a/langchain_md_files/integrations/providers/connery.mdx b/langchain_md_files/integrations/providers/connery.mdx
deleted file mode 100644
index 36684a97fa0e9068b886f5e36f76e1677f3a4a27..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/connery.mdx
+++ /dev/null
@@ -1,28 +0,0 @@
-# Connery
-
->[Connery SDK](https://github.com/connery-io/connery-sdk) is an NPM package that 
-> includes both an SDK and a CLI, designed for the development of plugins and actions.
->
->The CLI automates many things in the development process. The SDK 
-> offers a JavaScript API for defining plugins and actions and packaging them 
-> into a plugin server with a standardized REST API generated from the metadata. 
-> The plugin server handles authorization, input validation, and logging. 
-> So you can focus on the logic of your actions.
-> 
-> See the use cases and examples in the [Connery SDK documentation](https://sdk.connery.io/docs/use-cases/)
-
-## Toolkit
-
-See [usage example](/docs/integrations/tools/connery).
-
-```python
-from langchain_community.agent_toolkits.connery import ConneryToolkit
-```
-
-## Tools
-
-### ConneryAction
-
-```python
-from langchain_community.tools.connery import ConneryService
-```
diff --git a/langchain_md_files/integrations/providers/context.mdx b/langchain_md_files/integrations/providers/context.mdx
deleted file mode 100644
index 0b2e46c21ddfb24d79b817bf8cf10b80159c7d54..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/context.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# Context
-
->[Context](https://context.ai/) provides user analytics for LLM-powered products and features.
-
-## Installation and Setup
-
-We need to install the  `context-python` Python package:
-
-```bash
-pip install context-python
-```
-
-
-## Callbacks
-
-See a [usage example](/docs/integrations/callbacks/context).
-
-```python
-from langchain.callbacks import ContextCallbackHandler
-```
diff --git a/langchain_md_files/integrations/providers/couchbase.mdx b/langchain_md_files/integrations/providers/couchbase.mdx
deleted file mode 100644
index 35ef6748069671e2c3b941e61f0a93f67edd08f7..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/couchbase.mdx
+++ /dev/null
@@ -1,111 +0,0 @@
-# Couchbase
-
->[Couchbase](http://couchbase.com/) is an award-winning distributed NoSQL cloud database 
-> that delivers unmatched versatility, performance, scalability, and financial value 
-> for all of your cloud, mobile, AI, and edge computing applications.
-
-## Installation and Setup
-
-We have to install the `langchain-couchbase` package.
-
-```bash
-pip install langchain-couchbase
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/couchbase).
-
-```python
-from langchain_couchbase import CouchbaseVectorStore
-```
-
-## Document loader
-
-See a [usage example](/docs/integrations/document_loaders/couchbase).
-
-```python
-from langchain_community.document_loaders.couchbase import CouchbaseLoader
-```
-
-## LLM Caches
-
-### CouchbaseCache
-Use Couchbase as a cache for prompts and responses.
-
-See a [usage example](/docs/integrations/llm_caching/#couchbase-caches).
-
-To import this cache:
-```python
-from langchain_couchbase.cache import CouchbaseCache
-```
-
-To use this cache with your LLMs:
-```python
-from langchain_core.globals import set_llm_cache
-
-cluster = couchbase_cluster_connection_object
-
-set_llm_cache(
-    CouchbaseCache(
-        cluster=cluster,
-        bucket_name=BUCKET_NAME,
-        scope_name=SCOPE_NAME,
-        collection_name=COLLECTION_NAME,
-    )
-)
-```
-
-
-### CouchbaseSemanticCache
-Semantic caching allows users to retrieve cached prompts based on the semantic similarity between the user input and previously cached inputs. Under the hood it uses Couchbase as both a cache and a vectorstore.
-The CouchbaseSemanticCache needs a Search Index defined to work. Please look at the [usage example](/docs/integrations/vectorstores/couchbase) on how to set up the index.
-
-See a [usage example](/docs/integrations/llm_caching/#couchbase-caches).
-
-To import this cache:
-```python
-from langchain_couchbase.cache import CouchbaseSemanticCache
-```
-
-To use this cache with your LLMs:
-```python
-from langchain_core.globals import set_llm_cache
-
-# use any embedding provider...
-from langchain_openai.Embeddings import OpenAIEmbeddings
-
-embeddings = OpenAIEmbeddings()
-cluster = couchbase_cluster_connection_object
-
-set_llm_cache(
-    CouchbaseSemanticCache(
-        cluster=cluster,
-        embedding = embeddings,
-        bucket_name=BUCKET_NAME,
-        scope_name=SCOPE_NAME,
-        collection_name=COLLECTION_NAME,
-        index_name=INDEX_NAME,
-    )
-)
-```
-
-## Chat Message History
-Use Couchbase as the storage for your chat messages.
-
-See a [usage example](/docs/integrations/memory/couchbase_chat_message_history).
-
-To use the chat message history in your applications:
-```python
-from langchain_couchbase.chat_message_histories import CouchbaseChatMessageHistory
-
-message_history = CouchbaseChatMessageHistory(
-    cluster=cluster,
-    bucket_name=BUCKET_NAME,
-    scope_name=SCOPE_NAME,
-    collection_name=COLLECTION_NAME,
-    session_id="test-session",
-)
-
-message_history.add_user_message("hi!")
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/coze.mdx b/langchain_md_files/integrations/providers/coze.mdx
deleted file mode 100644
index ce1d0ce456b3506e4517ba82403ac2462ba0ca85..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/coze.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Coze
-
-[Coze](https://www.coze.com/) is an AI chatbot development platform that enables
-the creation and deployment of chatbots for handling diverse conversations across
-various applications.
-
-
-## Installation and Setup
-
-First, you need to get the `API_KEY` from the [Coze](https://www.coze.com/) website.
-
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/coze/).
-
-```python
-from langchain_community.chat_models import ChatCoze
-```
diff --git a/langchain_md_files/integrations/providers/cratedb.mdx b/langchain_md_files/integrations/providers/cratedb.mdx
deleted file mode 100644
index d367eb3e945425875e94c0b17f372b043afa4e30..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cratedb.mdx
+++ /dev/null
@@ -1,197 +0,0 @@
-# CrateDB
-
-> [CrateDB] is a distributed and scalable SQL database for storing and
-> analyzing massive amounts of data in near real-time, even with complex
-> queries. It is PostgreSQL-compatible, based on Lucene, and inheriting
-> from Elasticsearch.
-
-
-## Installation and Setup
-
-### Setup CrateDB
-There are two ways to get started with CrateDB quickly. Alternatively,
-choose other [CrateDB installation options].
-
-#### Start CrateDB on your local machine
-Example: Run a single-node CrateDB instance with security disabled,
-using Docker or Podman. This is not recommended for production use.
-
-```bash
-docker run --name=cratedb --rm \
-  --publish=4200:4200 --publish=5432:5432 --env=CRATE_HEAP_SIZE=2g \
-  crate:latest -Cdiscovery.type=single-node
-```
-
-#### Deploy cluster on CrateDB Cloud
-[CrateDB Cloud] is a managed CrateDB service. Sign up for a
-[free trial][CrateDB Cloud Console].
-
-### Install Client
-Install the most recent version of the [langchain-cratedb] package
-and a few others that are needed for this tutorial.
-```bash
-pip install --upgrade langchain-cratedb langchain-openai unstructured
-```
-
-
-## Documentation
-For a more detailed walkthrough of the CrateDB wrapper, see
-[using LangChain with CrateDB]. See also [all features of CrateDB]
-to learn about other functionality provided by CrateDB.
-
-
-## Features
-The CrateDB adapter for LangChain provides APIs to use CrateDB as vector store,
-document loader, and storage for chat messages.
-
-### Vector Store
-Use the CrateDB vector store functionality around `FLOAT_VECTOR` and `KNN_MATCH`
-for similarity search and other purposes. See also [CrateDBVectorStore Tutorial].
-
-Make sure you've configured a valid OpenAI API key.
-```bash
-export OPENAI_API_KEY=sk-XJZ...
-```
-```python
-from langchain_community.document_loaders import UnstructuredURLLoader
-from langchain_cratedb import CrateDBVectorStore
-from langchain_openai import OpenAIEmbeddings
-from langchain.text_splitter import CharacterTextSplitter
-
-loader = UnstructuredURLLoader(urls=["https://github.com/langchain-ai/langchain/raw/refs/tags/langchain-core==0.3.28/docs/docs/how_to/state_of_the_union.txt"])
-documents = loader.load()
-text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
-docs = text_splitter.split_documents(documents)
-
-embeddings = OpenAIEmbeddings()
-
-# Connect to a self-managed CrateDB instance on localhost.
-CONNECTION_STRING = "crate://?schema=testdrive"
-
-store = CrateDBVectorStore.from_documents(
-    documents=docs,
-    embedding=embeddings,
-    collection_name="state_of_the_union",
-    connection=CONNECTION_STRING,
-)
-
-query = "What did the president say about Ketanji Brown Jackson"
-docs_with_score = store.similarity_search_with_score(query)
-```
-
-### Document Loader
-Load load documents from a CrateDB database table, using the document loader
-`CrateDBLoader`, which is based on SQLAlchemy. See also [CrateDBLoader Tutorial].
-
-To use the document loader in your applications:
-```python
-import sqlalchemy as sa
-from langchain_community.utilities import SQLDatabase
-from langchain_cratedb import CrateDBLoader
-
-# Connect to a self-managed CrateDB instance on localhost.
-CONNECTION_STRING = "crate://?schema=testdrive"
-
-db = SQLDatabase(engine=sa.create_engine(CONNECTION_STRING))
-
-loader = CrateDBLoader(
-    'SELECT * FROM sys.summits LIMIT 42',
-    db=db,
-)
-documents = loader.load()
-```
-
-### Chat Message History
-Use CrateDB as the storage for your chat messages.
-See also [CrateDBChatMessageHistory Tutorial].
-
-To use the chat message history in your applications:
-```python
-from langchain_cratedb import CrateDBChatMessageHistory
-
-# Connect to a self-managed CrateDB instance on localhost.
-CONNECTION_STRING = "crate://?schema=testdrive"
-
-message_history = CrateDBChatMessageHistory(
-    session_id="test-session",
-    connection=CONNECTION_STRING,
-)
-
-message_history.add_user_message("hi!")
-```
-
-### Full Cache
-The standard / full cache avoids invoking the LLM when the supplied
-prompt is exactly the same as one encountered already.
-See also [CrateDBCache Example].
-
-To use the full cache in your applications:
-```python
-import sqlalchemy as sa
-from langchain.globals import set_llm_cache
-from langchain_openai import ChatOpenAI, OpenAIEmbeddings
-from langchain_cratedb import CrateDBCache
-
-# Configure cache.
-engine = sa.create_engine("crate://crate@localhost:4200/?schema=testdrive")
-set_llm_cache(CrateDBCache(engine))
-
-# Invoke LLM conversation.
-llm = ChatOpenAI(
-    model_name="chatgpt-4o-latest",
-    temperature=0.7,
-)
-print()
-print("Asking with full cache:")
-answer = llm.invoke("What is the answer to everything?")
-print(answer.content)
-```
-
-### Semantic Cache
-
-The semantic cache allows users to retrieve cached prompts based on semantic
-similarity between the user input and previously cached inputs. It also avoids
-invoking the LLM when not needed.
-See also [CrateDBSemanticCache Example].
-
-To use the semantic cache in your applications:
-```python
-import sqlalchemy as sa
-from langchain.globals import set_llm_cache
-from langchain_openai import ChatOpenAI, OpenAIEmbeddings
-from langchain_cratedb import CrateDBSemanticCache
-
-# Configure embeddings.
-embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
-
-# Configure cache.
-engine = sa.create_engine("crate://crate@localhost:4200/?schema=testdrive")
-set_llm_cache(
-    CrateDBSemanticCache(
-        embedding=embeddings,
-        connection=engine,
-        search_threshold=1.0,
-    )
-)
-
-# Invoke LLM conversation.
-llm = ChatOpenAI(model_name="chatgpt-4o-latest")
-print()
-print("Asking with semantic cache:")
-answer = llm.invoke("What is the answer to everything?")
-print(answer.content)
-```
-
-
-[all features of CrateDB]: https://cratedb.com/docs/guide/feature/
-[CrateDB]: https://cratedb.com/database
-[CrateDB Cloud]: https://cratedb.com/database/cloud
-[CrateDB Cloud Console]: https://console.cratedb.cloud/?utm_source=langchain&utm_content=documentation
-[CrateDB installation options]: https://cratedb.com/docs/guide/install/
-[CrateDBCache Example]: https://github.com/crate/langchain-cratedb/blob/main/examples/basic/cache.py
-[CrateDBSemanticCache Example]: https://github.com/crate/langchain-cratedb/blob/main/examples/basic/cache.py
-[CrateDBChatMessageHistory Tutorial]: https://github.com/crate/cratedb-examples/blob/main/topic/machine-learning/llm-langchain/conversational_memory.ipynb
-[CrateDBLoader Tutorial]: https://github.com/crate/cratedb-examples/blob/main/topic/machine-learning/llm-langchain/document_loader.ipynb
-[CrateDBVectorStore Tutorial]: https://github.com/crate/cratedb-examples/blob/main/topic/machine-learning/llm-langchain/vector_search.ipynb
-[langchain-cratedb]: https://pypi.org/project/langchain-cratedb/
-[using LangChain with CrateDB]: https://cratedb.com/docs/guide/integrate/langchain/
diff --git a/langchain_md_files/integrations/providers/ctransformers.mdx b/langchain_md_files/integrations/providers/ctransformers.mdx
deleted file mode 100644
index 09414a8fe7d4412cc169ad79b1a7113f08e911dc..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ctransformers.mdx
+++ /dev/null
@@ -1,57 +0,0 @@
-# C Transformers
-
-This page covers how to use the [C Transformers](https://github.com/marella/ctransformers) library within LangChain.
-It is broken into two parts: installation and setup, and then references to specific C Transformers wrappers.
-
-## Installation and Setup
-
-- Install the Python package with `pip install ctransformers`
-- Download a supported [GGML model](https://huggingface.co/TheBloke) (see [Supported Models](https://github.com/marella/ctransformers#supported-models))
-
-## Wrappers
-
-### LLM
-
-There exists a CTransformers LLM wrapper, which you can access with:
-
-```python
-from langchain_community.llms import CTransformers
-```
-
-It provides a unified interface for all models:
-
-```python
-llm = CTransformers(model='/path/to/ggml-gpt-2.bin', model_type='gpt2')
-
-print(llm.invoke('AI is going to'))
-```
-
-If you are getting `illegal instruction` error, try using `lib='avx'` or `lib='basic'`:
-
-```py
-llm = CTransformers(model='/path/to/ggml-gpt-2.bin', model_type='gpt2', lib='avx')
-```
-
-It can be used with models hosted on the Hugging Face Hub:
-
-```py
-llm = CTransformers(model='marella/gpt-2-ggml')
-```
-
-If a model repo has multiple model files (`.bin` files), specify a model file using:
-
-```py
-llm = CTransformers(model='marella/gpt-2-ggml', model_file='ggml-model.bin')
-```
-
-Additional parameters can be passed using the `config` parameter:
-
-```py
-config = {'max_new_tokens': 256, 'repetition_penalty': 1.1}
-
-llm = CTransformers(model='marella/gpt-2-ggml', config=config)
-```
-
-See [Documentation](https://github.com/marella/ctransformers#config) for a list of available parameters.
-
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/ctransformers).
diff --git a/langchain_md_files/integrations/providers/ctranslate2.mdx b/langchain_md_files/integrations/providers/ctranslate2.mdx
deleted file mode 100644
index 0e3c3a9319e4d281decfbbc7a1c8e006ae5d37d1..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ctranslate2.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
-# CTranslate2
-
->[CTranslate2](https://opennmt.net/CTranslate2/quickstart.html) is a C++ and Python library 
-> for efficient inference with Transformer models.
->
->The project implements a custom runtime that applies many performance optimization
-> techniques such as weights quantization, layers fusion, batch reordering, etc., 
-> to accelerate and reduce the memory usage of Transformer models on CPU and GPU.
->
->A full list of features and supported models is included in the 
-> [project’s repository](https://opennmt.net/CTranslate2/guides/transformers.html). 
-> To start, please check out the official [quickstart guide](https://opennmt.net/CTranslate2/quickstart.html).
-
-
-## Installation and Setup
-
-Install the Python package:
-
-```bash
-pip install ctranslate2
-```
-
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/ctranslate2).
-
-```python
-from langchain_community.llms import CTranslate2
-```
diff --git a/langchain_md_files/integrations/providers/cube.mdx b/langchain_md_files/integrations/providers/cube.mdx
deleted file mode 100644
index 9393bc36aa28c58616c8f5e5aa0b05f1b1dd9ffd..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/cube.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Cube
-
->[Cube](https://cube.dev/) is the Semantic Layer for building data apps. It helps 
-> data engineers and application developers access data from modern data stores, 
-> organize it into consistent definitions, and deliver it to every application.
-
-## Installation and Setup
-
-We have to get the API key and the URL of the Cube instance. See 
-[these instructions](https://cube.dev/docs/product/apis-integrations/rest-api#configuration-base-path).
-
-
-## Document loader
-
-### Cube Semantic Layer
-
-See a [usage example](/docs/integrations/document_loaders/cube_semantic).
-
-```python
-from langchain_community.document_loaders import CubeSemanticLoader
-```
diff --git a/langchain_md_files/integrations/providers/dappier.mdx b/langchain_md_files/integrations/providers/dappier.mdx
deleted file mode 100644
index 2a1235984f1064efcd1cecbe19e733eb1ac782eb..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/dappier.mdx
+++ /dev/null
@@ -1,59 +0,0 @@
-# Dappier
-
-[Dappier](https://dappier.com) connects any LLM or your Agentic AI to
-real-time, rights-cleared, proprietary data from trusted sources,
-making your AI an expert in anything. Our specialized models include
-Real-Time Web Search, News, Sports, Financial Stock Market Data,
-Crypto Data, and exclusive content from premium publishers. Explore a
-wide range of data models in our marketplace at
-[marketplace.dappier.com](https://marketplace.dappier.com).
-
-[Dappier](https://dappier.com) delivers enriched, prompt-ready, and
-contextually relevant data strings, optimized for seamless integration
-with LangChain. Whether you're building conversational AI, recommendation
-engines, or intelligent search, Dappier's LLM-agnostic RAG models ensure
-your AI has access to verified, up-to-date data—without the complexity of
-building and managing your own retrieval pipeline.
-
-## Installation and Setup
-
-Install ``langchain-dappier`` and set environment variable
-``DAPPIER_API_KEY``.
-
-```bash
-pip install -U langchain-dappier
-export DAPPIER_API_KEY="your-api-key"
-```
-
-We also need to set our Dappier API credentials, which can be generated at
-the [Dappier site.](https://platform.dappier.com/profile/api-keys).
-
-We can find the supported data models by heading over to the 
-[Dappier marketplace.](https://platform.dappier.com/marketplace)
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/dappier).
-
-```python
-from langchain_community.chat_models import ChatDappierAI
-```
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/dappier).
-
-```python
-from langchain_dappier import DappierRetriever
-```
-
-## Tool
-
-See a [usage example](/docs/integrations/tools/dappier).
-
-```python
-from langchain_dappier import (
-    DappierRealTimeSearchTool,
-    DappierAIRecommendationTool
-)
-```
diff --git a/langchain_md_files/integrations/providers/dashvector.mdx b/langchain_md_files/integrations/providers/dashvector.mdx
deleted file mode 100644
index b7ded751ddf7dbf506b0f9db7d2b7f61e861e3e1..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/dashvector.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
-# DashVector
-
-> [DashVector](https://help.aliyun.com/document_detail/2510225.html) is a fully-managed vectorDB service that supports high-dimension dense and sparse vectors, real-time insertion and filtered search. It is built to scale automatically and can adapt to different application requirements.  
-
-This document demonstrates to leverage DashVector within the LangChain ecosystem. In particular, it shows how to install DashVector, and how to use it as a VectorStore plugin in LangChain.
-It is broken into two parts: installation and setup, and then references to specific DashVector wrappers.
-
-## Installation and Setup
-
-
-Install the Python SDK:
-
-```bash
-pip install dashvector
-```
-
-You must have an API key. Here are the [installation instructions](https://help.aliyun.com/document_detail/2510223.html).
-
-
-## Embedding models
-
-```python
-from langchain_community.embeddings import DashScopeEmbeddings
-```
-
-See the [use example](/docs/integrations/vectorstores/dashvector).
-
-
-## Vector Store
-
-A DashVector Collection is wrapped as a familiar VectorStore for native usage within LangChain, 
-which allows it to be readily used for various scenarios, such as semantic search or example selection.
-
-You may import the vectorstore by:
-```python
-from langchain_community.vectorstores import DashVector
-```
-
-For a detailed walkthrough of the DashVector wrapper, please refer to [this notebook](/docs/integrations/vectorstores/dashvector)
diff --git a/langchain_md_files/integrations/providers/datadog.mdx b/langchain_md_files/integrations/providers/datadog.mdx
deleted file mode 100644
index b854c668759bf46d97ddc4f7fb0143119f3f67b9..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/datadog.mdx
+++ /dev/null
@@ -1,88 +0,0 @@
-# Datadog Tracing
-
->[ddtrace](https://github.com/DataDog/dd-trace-py) is a Datadog application performance monitoring (APM) library which provides an integration to monitor your LangChain application.
-
-Key features of the ddtrace integration for LangChain:
-- Traces: Capture LangChain requests, parameters, prompt-completions, and help visualize LangChain operations.
-- Metrics: Capture LangChain request latency, errors, and token/cost usage (for OpenAI LLMs and chat models).
-- Logs: Store prompt completion data for each LangChain operation.
-- Dashboard: Combine metrics, logs, and trace data into a single plane to monitor LangChain requests.
-- Monitors: Provide alerts in response to spikes in LangChain request latency or error rate.
-
-Note: The ddtrace LangChain integration currently provides tracing for LLMs, chat models, Text Embedding Models, Chains, and Vectorstores.
-
-## Installation and Setup
-
-1. Enable APM and StatsD in your Datadog Agent, along with a Datadog API key. For example, in Docker:
-
-```
-docker run -d --cgroupns host \
-              --pid host \
-              -v /var/run/docker.sock:/var/run/docker.sock:ro \
-              -v /proc/:/host/proc/:ro \
-              -v /sys/fs/cgroup/:/host/sys/fs/cgroup:ro \
-              -e DD_API_KEY=<DATADOG_API_KEY> \
-              -p 127.0.0.1:8126:8126/tcp \
-              -p 127.0.0.1:8125:8125/udp \
-              -e DD_DOGSTATSD_NON_LOCAL_TRAFFIC=true \
-              -e DD_APM_ENABLED=true \
-              gcr.io/datadoghq/agent:latest
-```
-
-2. Install the Datadog APM Python library.
-
-```
-pip install ddtrace>=1.17
-```
-
-
-3. The LangChain integration can be enabled automatically when you prefix your LangChain Python application command with `ddtrace-run`:
-
-```
-DD_SERVICE="my-service" DD_ENV="staging" DD_API_KEY=<DATADOG_API_KEY> ddtrace-run python <your-app>.py
-```
-
-**Note**: If the Agent is using a non-default hostname or port, be sure to also set `DD_AGENT_HOST`, `DD_TRACE_AGENT_PORT`, or `DD_DOGSTATSD_PORT`.
-
-Additionally, the LangChain integration can be enabled programmatically by adding `patch_all()` or `patch(langchain=True)` before the first import of `langchain` in your application.
-
-Note that using `ddtrace-run` or `patch_all()` will also enable the `requests` and `aiohttp` integrations which trace HTTP requests to LLM providers, as well as the `openai` integration which traces requests to the OpenAI library.
-
-```python
-from ddtrace import config, patch
-
-# Note: be sure to configure the integration before calling ``patch()``!
-# e.g. config.langchain["logs_enabled"] = True
-
-patch(langchain=True)
-
-# to trace synchronous HTTP requests
-# patch(langchain=True, requests=True)
-
-# to trace asynchronous HTTP requests (to the OpenAI library)
-# patch(langchain=True, aiohttp=True)
-
-# to include underlying OpenAI spans from the OpenAI integration
-# patch(langchain=True, openai=True)patch_all
-```
-
-See the [APM Python library documentation](https://ddtrace.readthedocs.io/en/stable/installation_quickstart.html) for more advanced usage.
-
-
-## Configuration
-
-See the [APM Python library documentation](https://ddtrace.readthedocs.io/en/stable/integrations.html#langchain) for all the available configuration options.
-
-
-### Log Prompt & Completion Sampling
-
-To enable log prompt and completion sampling, set the `DD_LANGCHAIN_LOGS_ENABLED=1` environment variable. By default, 10% of traced requests will emit logs containing the prompts and completions.
-
-To adjust the log sample rate, see the [APM library documentation](https://ddtrace.readthedocs.io/en/stable/integrations.html#langchain).
-
-**Note**: Logs submission requires `DD_API_KEY` to be specified when running `ddtrace-run`.
-
-
-## Troubleshooting
-
-Need help? Create an issue on [ddtrace](https://github.com/DataDog/dd-trace-py) or contact [Datadog support](https://docs.datadoghq.com/help/).
diff --git a/langchain_md_files/integrations/providers/datadog_logs.mdx b/langchain_md_files/integrations/providers/datadog_logs.mdx
deleted file mode 100644
index eb365eed922cf2d7f1d31bf2ee246a9de95a7e3c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/datadog_logs.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Datadog Logs
-
->[Datadog](https://www.datadoghq.com/) is a monitoring and analytics platform for cloud-scale applications.
-
-## Installation and Setup
-
-```bash
-pip install datadog_api_client
-```
-
-We must initialize the loader with the Datadog API key and APP key, and we need to set up the query to extract the desired logs.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/datadog_logs).
-
-```python
-from langchain_community.document_loaders import DatadogLogsLoader
-```
diff --git a/langchain_md_files/integrations/providers/dataforseo.mdx b/langchain_md_files/integrations/providers/dataforseo.mdx
deleted file mode 100644
index 37d8884fa4b42b9cd3b0064078db99967ec49d80..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/dataforseo.mdx
+++ /dev/null
@@ -1,52 +0,0 @@
-# DataForSEO
-
->[DataForSeo](https://dataforseo.com/) provides comprehensive SEO and digital marketing data solutions via API.
-
-This page provides instructions on how to use the DataForSEO search APIs within LangChain.
-
-## Installation and Setup
-
-Get a [DataForSEO API Access login and password](https://app.dataforseo.com/register), and set them as environment variables 
-(`DATAFORSEO_LOGIN` and `DATAFORSEO_PASSWORD` respectively).
-
-```python
-import os
-
-os.environ["DATAFORSEO_LOGIN"] = "your_login"
-os.environ["DATAFORSEO_PASSWORD"] = "your_password"
-```
-
-
-## Utility
-
-The `DataForSEO` utility wraps the API. To import this utility, use:
-
-```python
-from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper
-```
-
-For a detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/dataforseo).
-
-## Tool
-
-You can also load this wrapper as a Tool to use with an Agent:
-
-```python
-from langchain.agents import load_tools
-tools = load_tools(["dataforseo-api-search"])
-```
-
-This will load the following tools:
-
-```python
-from langchain_community.tools import DataForSeoAPISearchRun
-from langchain_community.tools import DataForSeoAPISearchResults
-```
-
-## Example usage
-
-```python
-dataforseo = DataForSeoAPIWrapper(api_login="your_login", api_password="your_password")
-result = dataforseo.run("Bill Gates")
-print(result)
-```
diff --git a/langchain_md_files/integrations/providers/dataherald.mdx b/langchain_md_files/integrations/providers/dataherald.mdx
deleted file mode 100644
index 2b1c276df526b4d7d5f01ec8f88735b021dea832..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/dataherald.mdx
+++ /dev/null
@@ -1,64 +0,0 @@
-# Dataherald
-
->[Dataherald](https://www.dataherald.com) is a natural language-to-SQL.
-
-This page covers how to use the `Dataherald API` within LangChain.
-
-## Installation and Setup
-- Install requirements with 
-```bash
-pip install dataherald
-```
-- Go to dataherald and sign up [here](https://www.dataherald.com)
-- Create an app and get your `API KEY`
-- Set your `API KEY` as an environment variable `DATAHERALD_API_KEY`
-
-
-## Wrappers
-
-### Utility
-
-There exists a DataheraldAPIWrapper utility which wraps this API. To import this utility:
-
-```python
-from langchain_community.utilities.dataherald import DataheraldAPIWrapper
-```
-
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/dataherald).
-
-### Tool
-
-You can use the tool in an agent like this:
-```python
-from langchain_community.utilities.dataherald import DataheraldAPIWrapper
-from langchain_community.tools.dataherald.tool import DataheraldTextToSQL
-from langchain_openai import ChatOpenAI
-from langchain import hub
-from langchain.agents import AgentExecutor, create_react_agent, load_tools
-
-api_wrapper = DataheraldAPIWrapper(db_connection_id="<db_connection_id>")
-tool = DataheraldTextToSQL(api_wrapper=api_wrapper)
-llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
-prompt = hub.pull("hwchase17/react")
-agent = create_react_agent(llm, tools, prompt)
-agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
-agent_executor.invoke({"input":"Return the sql for this question: How many employees are in the company?"})
-```
-
-Output
-```shell
-> Entering new AgentExecutor chain...
-I need to use a tool that can convert this question into SQL.
-Action: dataherald
-Action Input: How many employees are in the company?Answer: SELECT
-    COUNT(*) FROM employeesI now know the final answer
-Final Answer: SELECT
-    COUNT(*)
-FROM
-    employees
-
-> Finished chain.
-{'input': 'Return the sql for this question: How many employees are in the company?', 'output': "SELECT \n    COUNT(*)\nFROM \n    employees"}
-```
-
-For more information on tools, see [this page](/docs/how_to/tools_builtin).
diff --git a/langchain_md_files/integrations/providers/dedoc.mdx b/langchain_md_files/integrations/providers/dedoc.mdx
deleted file mode 100644
index 3f2aaa206e325e127e555f3a2fb6ca64e951296d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/dedoc.mdx
+++ /dev/null
@@ -1,56 +0,0 @@
-# Dedoc
-
->[Dedoc](https://dedoc.readthedocs.io) is an [open-source](https://github.com/ispras/dedoc)
-library/service that extracts texts, tables, attached files and document structure
-(e.g., titles, list items, etc.) from files of various formats.
-
-`Dedoc` supports `DOCX`, `XLSX`, `PPTX`, `EML`, `HTML`, `PDF`, images and more.
-Full list of supported formats can be found [here](https://dedoc.readthedocs.io/en/latest/#id1).
-
-## Installation and Setup
-
-### Dedoc library
-
-You can install `Dedoc` using `pip`.
-In this case, you will need to install dependencies,
-please go [here](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html)
-to get more information.
-
-```bash
-pip install dedoc
-```
-
-### Dedoc API
-
-If you are going to use `Dedoc` API, you don't need to install `dedoc` library.
-In this case, you should run the `Dedoc` service, e.g. `Docker` container (please see
-[the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker)
-for more details):
-
-```bash
-docker pull dedocproject/dedoc
-docker run -p 1231:1231
-```
-
-## Document Loader
-
-* For handling files of any formats (supported by `Dedoc`), you can use `DedocFileLoader`:
-
-    ```python
-    from langchain_community.document_loaders import DedocFileLoader
-    ```
-
-* For handling PDF files (with or without a textual layer), you can use `DedocPDFLoader`:
-
-    ```python
-    from langchain_community.document_loaders import DedocPDFLoader
-    ```
-
-* For handling files of any formats without library installation,
-you can use `Dedoc API` with `DedocAPIFileLoader`:
-
-    ```python
-    from langchain_community.document_loaders import DedocAPIFileLoader
-    ```
-
-Please see a [usage example](/docs/integrations/document_loaders/dedoc) for more details.
diff --git a/langchain_md_files/integrations/providers/deepinfra.mdx b/langchain_md_files/integrations/providers/deepinfra.mdx
deleted file mode 100644
index 5eb2b1b38770e4170f87ad72f19f030811752fba..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/deepinfra.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
-# DeepInfra
-
->[DeepInfra](https://deepinfra.com/docs) allows us to run the 
-> [latest machine learning models](https://deepinfra.com/models) with ease. 
-> DeepInfra takes care of all the heavy lifting related to running, scaling and monitoring 
-> the models. Users can focus on your application and integrate the models with simple REST API calls.
-
->DeepInfra provides [examples](https://deepinfra.com/docs/advanced/langchain) of integration with LangChain.
-
-This page covers how to use the `DeepInfra` ecosystem within `LangChain`.
-It is broken into two parts: installation and setup, and then references to specific DeepInfra wrappers.
-
-## Installation and Setup
-
-- Get your DeepInfra api key from this link [here](https://deepinfra.com/).
-- Get an DeepInfra api key and set it as an environment variable (`DEEPINFRA_API_TOKEN`)
-
-## Available Models
-
-DeepInfra provides a range of Open Source LLMs ready for deployment.
-
-You can see supported models for
-[text-generation](https://deepinfra.com/models?type=text-generation) and
-[embeddings](https://deepinfra.com/models?type=embeddings).
-
-You can view a [list of request and response parameters](https://deepinfra.com/meta-llama/Llama-2-70b-chat-hf/api).
-
-Chat models [follow openai api](https://deepinfra.com/meta-llama/Llama-2-70b-chat-hf/api?example=openai-http)
-
-
-## LLM
-
-See a [usage example](/docs/integrations/llms/deepinfra).
-
-```python
-from langchain_community.llms import DeepInfra
-```
-
-## Embeddings
-
-See a [usage example](/docs/integrations/text_embedding/deepinfra).
-
-```python
-from langchain_community.embeddings import DeepInfraEmbeddings
-```
-
-## Chat Models
-
-See a [usage example](/docs/integrations/chat/deepinfra).
-
-```python
-from langchain_community.chat_models import ChatDeepInfra
-```
diff --git a/langchain_md_files/integrations/providers/deeplake.mdx b/langchain_md_files/integrations/providers/deeplake.mdx
deleted file mode 100644
index 4aaea7861f554ba42f46e8d207a35185f769a3ad..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/deeplake.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-# Deeplake
-
-[Deeplake](https://www.deeplake.ai/) is a database optimized for AI and deep learning
-applications.
-
-
-## Installation and Setup
-
-```bash
-pip install langchain-deeplake
-```
-
-## Vector stores
-
-See detail on available vector stores
-[here](/docs/integrations/vectorstores/activeloop_deeplake).
diff --git a/langchain_md_files/integrations/providers/deepsparse.mdx b/langchain_md_files/integrations/providers/deepsparse.mdx
deleted file mode 100644
index 562d9e3e76512e3b717360e88172ddbca3f92877..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/deepsparse.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-# DeepSparse
-
-This page covers how to use the [DeepSparse](https://github.com/neuralmagic/deepsparse) inference runtime within LangChain.
-It is broken into two parts: installation and setup, and then examples of DeepSparse usage.
-
-## Installation and Setup
-
-- Install the Python package with `pip install deepsparse`
-- Choose a [SparseZoo model](https://sparsezoo.neuralmagic.com/?useCase=text_generation) or export a support model to ONNX [using Optimum](https://github.com/neuralmagic/notebooks/blob/main/notebooks/opt-text-generation-deepsparse-quickstart/OPT_Text_Generation_DeepSparse_Quickstart.ipynb)
-
-
-## LLMs
-
-There exists a DeepSparse LLM wrapper, which you can access with:
-
-```python
-from langchain_community.llms import DeepSparse
-```
-
-It provides a unified interface for all models:
-
-```python
-llm = DeepSparse(model='zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none')
-
-print(llm.invoke('def fib():'))
-```
-
-Additional parameters can be passed using the `config` parameter:
-
-```python
-config = {'max_generated_tokens': 256}
-
-llm = DeepSparse(model='zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none', config=config)
-```
diff --git a/langchain_md_files/integrations/providers/diffbot.mdx b/langchain_md_files/integrations/providers/diffbot.mdx
deleted file mode 100644
index 1a9e9934642f3ef383f30a660b9576511620b9ec..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/diffbot.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
-# Diffbot
-
-> [Diffbot](https://docs.diffbot.com/docs) is a suite of ML-based products that make it easy to structure and integrate web data.
-
-## Installation and Setup
-
-[Get a free Diffbot API token](https://app.diffbot.com/get-started/) and [follow these instructions](https://docs.diffbot.com/reference/authentication) to authenticate your requests.
-
-## Document Loader
-
-Diffbot's [Extract API](https://docs.diffbot.com/reference/extract-introduction) is a service that structures and normalizes data from web pages. 
-
-Unlike traditional web scraping tools, `Diffbot Extract` doesn't require any rules to read the content on a page. It uses a computer vision model to classify a page into one of 20 possible types, and then transforms raw HTML markup into JSON. The resulting structured JSON follows a consistent [type-based ontology](https://docs.diffbot.com/docs/ontology), which makes it easy to extract data from multiple different web sources with the same schema. 
-
-See a [usage example](/docs/integrations/document_loaders/diffbot).
-
-```python
-from langchain_community.document_loaders import DiffbotLoader
-```
-
-## Graphs
-
-Diffbot's [Natural Language Processing API](https://www.diffbot.com/products/natural-language/) allows for the extraction of entities, relationships, and semantic meaning from unstructured text data.
-
-See a [usage example](/docs/integrations/graphs/diffbot).
-
-```python
-from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer
-```
diff --git a/langchain_md_files/integrations/providers/dingo.mdx b/langchain_md_files/integrations/providers/dingo.mdx
deleted file mode 100644
index b12a6a72cbc6c95cf58fbd4c72bda5831be302db..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/dingo.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# DingoDB
-
->[DingoDB](https://github.com/dingodb) is a distributed multi-modal vector 
-> database. It combines the features of a data lake and a vector database, 
-> allowing for the storage of any type of data (key-value, PDF, audio, 
-> video, etc.) regardless of its size. Utilizing DingoDB, you can construct 
-> your own Vector Ocean (the next-generation data architecture following data 
-> warehouse and data lake). This enables 
-> the analysis of both structured and unstructured data through 
-> a singular SQL with exceptionally low latency in real time.
-
-## Installation and Setup
-
-Install the Python SDK
-
-```bash
-pip install dingodb
-```
-
-## VectorStore
-
-There exists a wrapper around DingoDB indexes, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-To import this vectorstore:
-
-```python
-from langchain_community.vectorstores import Dingo
-```
-
-For a more detailed walkthrough of the DingoDB wrapper, see [this notebook](/docs/integrations/vectorstores/dingo)
diff --git a/langchain_md_files/integrations/providers/discord-shikenso.mdx b/langchain_md_files/integrations/providers/discord-shikenso.mdx
deleted file mode 100644
index 32b5a576cc7ce618b334fd51d1dbbc57f7a7a7f1..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/discord-shikenso.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
-# Discord
-
-> [Discord](https://discord.com/) is an instant messaging, voice, and video communication platform widely used by communities of all types.
-
-## Installation and Setup
-
-Install the `langchain-discord-shikenso` package:
-
-```bash
-pip install langchain-discord-shikenso
-```
-
-You must provide a bot token via environment variable so the tools can authenticate with the Discord API:
-
-```bash
-export DISCORD_BOT_TOKEN="your-discord-bot-token"
-```
-
-If `DISCORD_BOT_TOKEN` is not set, the tools will raise a `ValueError` when instantiated.
-
----
-
-## Tools
-
-Below is a snippet showing how you can read and send messages in Discord. For more details, see the [documentation for Discord tools](/docs/integrations/tools/discord).
-
-```python
-from langchain_discord.tools.discord_read_messages import DiscordReadMessages
-from langchain_discord.tools.discord_send_messages import DiscordSendMessage
-
-# Create tool instances
-read_tool = DiscordReadMessages()
-send_tool = DiscordSendMessage()
-
-# Example: Read the last 3 messages from channel 1234567890
-read_result = read_tool({"channel_id": "1234567890", "limit": 3})
-print(read_result)
-
-# Example: Send a message to channel 1234567890
-send_result = send_tool({"channel_id": "1234567890", "message": "Hello from Markdown example!"})
-print(send_result)
-```
-
----
-
-## Toolkit
-
-`DiscordToolkit` groups multiple Discord-related tools into a single interface. For a usage example, see [the Discord toolkit docs](/docs/integrations/tools/discord).
-
-```python
-from langchain_discord.toolkits import DiscordToolkit
-
-toolkit = DiscordToolkit()
-tools = toolkit.get_tools()
-
-read_tool = tools[0]  # DiscordReadMessages
-send_tool = tools[1]  # DiscordSendMessage
-```
-
----
-
-## Future Integrations
-
-Additional integrations (e.g., document loaders, chat loaders) could be added for Discord.
-Check the [Discord Developer Docs](https://discord.com/developers/docs/intro) for more information, and watch for updates or advanced usage examples in the [langchain_discord GitHub repo](https://github.com/Shikenso-Analytics/langchain-discord).
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/discord.mdx b/langchain_md_files/integrations/providers/discord.mdx
deleted file mode 100644
index 17b87229031ab0054c96903feb0c987368e034bc..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/discord.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
-# Discord (community loader)
-
->[Discord](https://discord.com/) is a VoIP and instant messaging social platform. Users have the ability to communicate 
-> with voice calls, video calls, text messaging, media and files in private chats or as part of communities called 
-> "servers". A server is a collection of persistent chat rooms and voice channels which can be accessed via invite links.
-
-## Installation and Setup
-
-```bash
-pip install pandas
-```
-
-Follow these steps to download your `Discord` data:
-
-1. Go to your **User Settings**
-2. Then go to **Privacy and Safety**
-3. Head over to the **Request all of my Data** and click on **Request Data** button
-
-It might take 30 days for you to receive your data. You'll receive an email at the address which is registered 
-with Discord. That email will have a download button using which you would be able to download your personal Discord data.
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/discord).
-
-**NOTE:** The  `DiscordChatLoader` is not the `ChatLoader` but a `DocumentLoader`. 
-It is used to load the data from the `Discord` data dump.
-For the `ChatLoader` see Chat Loader section below.
-
-```python
-from langchain_community.document_loaders import DiscordChatLoader
-```
-
-## Chat Loader
-
-See a [usage example](/docs/integrations/chat_loaders/discord).
-
diff --git a/langchain_md_files/integrations/providers/docarray.mdx b/langchain_md_files/integrations/providers/docarray.mdx
deleted file mode 100644
index d1d41a19834d1cd8bc1327e1840b60b739faa6ec..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/docarray.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
-# DocArray
-
-> [DocArray](https://docarray.jina.ai/) is a library for nested, unstructured, multimodal data in transit, 
-> including text, image, audio, video, 3D mesh, etc. It allows deep-learning engineers to efficiently process, 
-> embed, search, recommend, store, and transfer multimodal data with a Pythonic API.
-
-
-## Installation and Setup
-
-We need to install `docarray` python package.
-
-```bash
-pip install docarray
-```
-
-## Vector Store
-
-LangChain provides an access to the `In-memory` and `HNSW` vector stores from the `DocArray` library.
-
-See a [usage example](/docs/integrations/vectorstores/docarray_hnsw).
-
-```python
-from langchain_community.vectorstores import DocArrayHnswSearch
-```
-See a [usage example](/docs/integrations/vectorstores/docarray_in_memory).
-
-```python
-from langchain_community.vectorstores DocArrayInMemorySearch
-```
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/docarray_retriever).
-
-```python
-from langchain_community.retrievers import DocArrayRetriever
-```
diff --git a/langchain_md_files/integrations/providers/docling.mdx b/langchain_md_files/integrations/providers/docling.mdx
deleted file mode 100644
index e7515221c9b46732b0071844131a45d677c8213e..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/docling.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
-# Docling
-
-> [Docling](https://github.com/DS4SD/docling) parses PDF, DOCX, PPTX, HTML, and other formats into a rich unified representation including document layout, tables etc., making them ready for generative AI workflows like RAG.
->
-> This integration provides Docling's capabilities via the `DoclingLoader` document loader.
-
-## Installation and Setup
-
-Simply install `langchain-docling` from your package manager, e.g. pip:
-
-```shell
-pip install langchain-docling
-```
-
-## Document Loader
-
-The `DoclingLoader` class in `langchain-docling` seamlessly integrates Docling into
-LangChain, enabling you to:
-- use various document types in your LLM applications with ease and speed, and
-- leverage Docling's rich representation for advanced, document-native grounding.
-
-Basic usage looks as follows:
-
-```python
-from langchain_docling import DoclingLoader
-
-FILE_PATH = ["https://arxiv.org/pdf/2408.09869"]  # Docling Technical Report
-
-loader = DoclingLoader(file_path=FILE_PATH)
-
-docs = loader.load()
-```
-
-For end-to-end usage check out
-[this example](/docs/integrations/document_loaders/docling).
-
-## Additional Resources
-
-- [LangChain Docling integration GitHub](https://github.com/DS4SD/docling-langchain)
-- [LangChain Docling integration PyPI package](https://pypi.org/project/langchain-docling/)
-- [Docling GitHub](https://github.com/DS4SD/docling)
-- [Docling docs](https://ds4sd.github.io/docling/)
diff --git a/langchain_md_files/integrations/providers/doctran.mdx b/langchain_md_files/integrations/providers/doctran.mdx
deleted file mode 100644
index c85844766e1c732db5431410311caa2498230715..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/doctran.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
-# Doctran
-
->[Doctran](https://github.com/psychic-api/doctran) is a python package. It uses LLMs and open-source 
-> NLP libraries to transform raw text into clean, structured, information-dense documents 
-> that are optimized for vector space retrieval. You can think of `Doctran` as a black box where 
-> messy strings go in and nice, clean, labelled strings come out.
-
-
-## Installation and Setup
-
-```bash
-pip install doctran
-```
-
-## Document Transformers
-
-### Document Interrogator
-
-See a [usage example for DoctranQATransformer](/docs/integrations/document_transformers/doctran_interrogate_document).
-
-```python
-from langchain_community.document_loaders import DoctranQATransformer
-```
-### Property Extractor
-
-See a [usage example for DoctranPropertyExtractor](/docs/integrations/document_transformers/doctran_extract_properties).
-
-```python
-from langchain_community.document_loaders import DoctranPropertyExtractor
-```
-### Document Translator
-
-See a [usage example for DoctranTextTranslator](/docs/integrations/document_transformers/doctran_translate_document).
-
-```python
-from langchain_community.document_loaders import DoctranTextTranslator
-```
diff --git a/langchain_md_files/integrations/providers/docugami.mdx b/langchain_md_files/integrations/providers/docugami.mdx
deleted file mode 100644
index dcd0566c4a773deeb3382e68ad7ec2f4c489b17e..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/docugami.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Docugami
-
->[Docugami](https://docugami.com) converts business documents into a Document XML Knowledge Graph, generating forests 
-> of XML semantic trees representing entire documents. This is a rich representation that includes the semantic and 
-> structural characteristics of various chunks in the document as an XML tree.
-
-## Installation and Setup
-
-
-```bash
-pip install dgml-utils
-pip install docugami-langchain
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/docugami).
-
-```python
-from docugami_langchain.document_loaders import DocugamiLoader
-```
diff --git a/langchain_md_files/integrations/providers/docusaurus.mdx b/langchain_md_files/integrations/providers/docusaurus.mdx
deleted file mode 100644
index e137d627724c031f662ad7762383257fc12a823d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/docusaurus.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# Docusaurus
-
->[Docusaurus](https://docusaurus.io/) is a static-site generator which provides 
-> out-of-the-box documentation features.
- 
-
-## Installation and Setup
-
-
-```bash
-pip install -U beautifulsoup4 lxml
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/docusaurus).
-
-```python
-from langchain_community.document_loaders import DocusaurusLoader
-```
diff --git a/langchain_md_files/integrations/providers/dria.mdx b/langchain_md_files/integrations/providers/dria.mdx
deleted file mode 100644
index 7e3c5cdbace43908d8ad5d9d0ffbc399009c86b8..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/dria.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Dria
-
->[Dria](https://dria.co/) is a hub of public RAG models for developers to 
-> both contribute and utilize a shared embedding lake.
-
-See more details about the LangChain integration with Dria 
-at [this page](https://dria.co/docs/integrations/langchain).
-
-## Installation and Setup
-
-You have to install a python package:
-
-```bash
-pip install dria
-```
-
-You have to get an API key from Dria. You can get it by signing up at [Dria](https://dria.co/).
-
-## Retrievers
-
-See a [usage example](/docs/integrations/retrievers/dria_index).
-
-```python
-from langchain_community.retrievers import DriaRetriever
-```
diff --git a/langchain_md_files/integrations/providers/dropbox.mdx b/langchain_md_files/integrations/providers/dropbox.mdx
deleted file mode 100644
index 590a58b9a681a714b7642e3790f95624ea47ca3b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/dropbox.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Dropbox
-
->[Dropbox](https://en.wikipedia.org/wiki/Dropbox) is a file hosting service that brings everything-traditional 
-> files, cloud content, and web shortcuts together in one place.
- 
-
-## Installation and Setup
-
-See the detailed [installation guide](/docs/integrations/document_loaders/dropbox#prerequisites).
-
-```bash
-pip install -U dropbox
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/dropbox).
-
-```python
-from langchain_community.document_loaders import DropboxLoader
-```
diff --git a/langchain_md_files/integrations/providers/duckdb.mdx b/langchain_md_files/integrations/providers/duckdb.mdx
deleted file mode 100644
index f965e129b9536d10bd25750b2b0e10fcd5cdb411..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/duckdb.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# DuckDB
-
->[DuckDB](https://duckdb.org/) is an in-process SQL OLAP database management system.
-
-## Installation and Setup
-
-First, you need to install `duckdb` python package.
-
-```bash
-pip install duckdb
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/duckdb).
-
-```python
-from langchain_community.document_loaders import DuckDBLoader
-```
diff --git a/langchain_md_files/integrations/providers/duckduckgo_search.mdx b/langchain_md_files/integrations/providers/duckduckgo_search.mdx
deleted file mode 100644
index 29ab01981f45fb30d346e1b1d75759fd9dce408b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/duckduckgo_search.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# DuckDuckGo Search
-
->[DuckDuckGo Search](https://github.com/deedy5/duckduckgo_search) is a package that
-> searches for words, documents, images, videos, news, maps and text
-> translation using the `DuckDuckGo.com` search engine. It is downloading files 
-> and images to a local hard drive.
-
-## Installation and Setup
-
-You have to install a python package:
-
-```bash
-pip install duckduckgo-search
-```
-
-## Tools
-
-See a [usage example](/docs/integrations/tools/ddg).
-
-There are two tools available:
-
-```python
-from langchain_community.tools import DuckDuckGoSearchRun
-from langchain_community.tools import DuckDuckGoSearchResults
-```
diff --git a/langchain_md_files/integrations/providers/e2b.mdx b/langchain_md_files/integrations/providers/e2b.mdx
deleted file mode 100644
index ee0ca085aa440a5569b92b35719af1887e43dd30..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/e2b.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# E2B
-
->[E2B](https://e2b.dev/) provides open-source secure sandboxes 
-> for AI-generated code execution. See more [here](https://github.com/e2b-dev).
-
-## Installation and Setup
-
-You have to install a python package:
-
-```bash
-pip install e2b_code_interpreter
-```
-
-## Tool
-
-See a [usage example](/docs/integrations/tools/e2b_data_analysis).
-
-```python
-from langchain_community.tools import E2BDataAnalysisTool
-```
diff --git a/langchain_md_files/integrations/providers/edenai.mdx b/langchain_md_files/integrations/providers/edenai.mdx
deleted file mode 100644
index a33e92ec6a93c9ea45b50063879fc04d97462104..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/edenai.mdx
+++ /dev/null
@@ -1,62 +0,0 @@
-# Eden AI
-
->[Eden AI](https://docs.edenai.co/docs/getting-started-with-eden-ai) user interface (UI) 
-> is designed for handling the AI projects. With `Eden AI Portal`, 
-> you can perform no-code AI using the best engines for the market.
-
-
-## Installation and Setup
-
-Accessing the Eden AI API requires an API key, which you can get by 
-[creating an account](https://app.edenai.run/user/register) and 
-heading [here](https://app.edenai.run/admin/account/settings). 
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/edenai).
-
-```python
-from langchain_community.llms import EdenAI
-
-```
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/edenai).
-
-```python
-from langchain_community.chat_models.edenai import ChatEdenAI
-```
-
-## Embedding models
-
-See a [usage example](/docs/integrations/text_embedding/edenai).
-
-```python
-from langchain_community.embeddings.edenai import EdenAiEmbeddings
-```
-
-## Tools
-
-Eden AI provides a list of tools that grants your Agent the ability to do multiple tasks, such as:
-* speech to text
-* text to speech
-* text explicit content detection
-* image explicit content detection
-* object detection
-* OCR invoice parsing
-* OCR ID parsing
-
-See a [usage example](/docs/integrations/tools/edenai_tools).
-
-```python
-from langchain_community.tools.edenai import (
-    EdenAiExplicitImageTool,
-    EdenAiObjectDetectionTool,
-    EdenAiParsingIDTool,
-    EdenAiParsingInvoiceTool,
-    EdenAiSpeechToTextTool,
-    EdenAiTextModerationTool,
-    EdenAiTextToSpeechTool,
-)
-```
diff --git a/langchain_md_files/integrations/providers/elasticsearch.mdx b/langchain_md_files/integrations/providers/elasticsearch.mdx
deleted file mode 100644
index 734ef9d46ce8ecfbb3c39f707bb7c49a4f7fb78c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/elasticsearch.mdx
+++ /dev/null
@@ -1,108 +0,0 @@
-# Elasticsearch
-
-> [Elasticsearch](https://www.elastic.co/elasticsearch/) is a distributed, RESTful search and analytics engine.
-> It provides a distributed, multi-tenant-capable full-text search engine with an HTTP web interface and schema-free
-> JSON documents.
-
-## Installation and Setup
-
-### Setup Elasticsearch
-
-There are two ways to get started with Elasticsearch:
-
-#### Install Elasticsearch on your local machine via Docker
-
-Example: Run a single-node Elasticsearch instance with security disabled. 
-This is not recommended for production use.
-
-```bash
-    docker run -p 9200:9200 -e "discovery.type=single-node" -e "xpack.security.enabled=false" -e "xpack.security.http.ssl.enabled=false" docker.elastic.co/elasticsearch/elasticsearch:8.9.0
-```
-
-#### Deploy Elasticsearch on Elastic Cloud
-
-`Elastic Cloud` is a managed Elasticsearch service. Signup for a [free trial](https://cloud.elastic.co/registration?utm_source=langchain&utm_content=documentation).
-
-### Install Client
-
-```bash
-pip install elasticsearch
-pip install langchain-elasticsearch
-```
-
-## Embedding models
-
-See a [usage example](/docs/integrations/text_embedding/elasticsearch).
-
-```python
-from langchain_elasticsearch import ElasticsearchEmbeddings
-```
-
-## Vector store
-
-See a [usage example](/docs/integrations/vectorstores/elasticsearch).
-
-```python
-from langchain_elasticsearch import ElasticsearchStore
-```
-
-### Third-party integrations
-
-#### EcloudESVectorStore
-
-```python
-from langchain_community.vectorstores.ecloud_vector_search import EcloudESVectorStore
-```
-
-## Retrievers
-
-### ElasticsearchRetriever
-
-The `ElasticsearchRetriever` enables flexible access to all Elasticsearch features 
-through the Query DSL. 
-
-See a [usage example](/docs/integrations/retrievers/elasticsearch_retriever).
-
-```python
-from langchain_elasticsearch import ElasticsearchRetriever
-```
-
-### BM25
-
-See a [usage example](/docs/integrations/retrievers/elastic_search_bm25).
-
-```python
-from langchain_community.retrievers import ElasticSearchBM25Retriever
-```
-## Memory
-
-See a [usage example](/docs/integrations/memory/elasticsearch_chat_message_history).
-
-```python
-from langchain_elasticsearch import ElasticsearchChatMessageHistory
-```
-
-## LLM cache
-
-See a [usage example](/docs/integrations/llm_caching/#elasticsearch-caches).
-
-```python
-from langchain_elasticsearch import ElasticsearchCache
-```
-
-## Byte Store
-
-See a [usage example](/docs/integrations/stores/elasticsearch).
-
-```python
-from langchain_elasticsearch import ElasticsearchEmbeddingsCache
-```
-
-## Chain
-
-It is a chain for interacting with Elasticsearch Database.
-
-```python
-from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain
-```
-
diff --git a/langchain_md_files/integrations/providers/elevenlabs.mdx b/langchain_md_files/integrations/providers/elevenlabs.mdx
deleted file mode 100644
index 563527304789d3774c508c7efb1b3fee2d61b194..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/elevenlabs.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
-# ElevenLabs
-
->[ElevenLabs](https://elevenlabs.io/about) is a voice AI research & deployment company 
-> with a mission to make content universally accessible in any language & voice.
->
->`ElevenLabs` creates the most realistic, versatile and contextually-aware 
-> AI audio, providing the ability to generate speech in hundreds of 
-> new and existing voices in 29 languages.
-
-## Installation and Setup
-
-First, you need to set up an ElevenLabs account. You can follow the 
-[instructions here](https://docs.elevenlabs.io/welcome/introduction).
-
-Install the Python package:
-
-```bash
-pip install elevenlabs
-```
-
-## Tools
-
-See a [usage example](/docs/integrations/tools/eleven_labs_tts).
-
-```python
-from langchain_community.tools import ElevenLabsText2SpeechTool
-```
diff --git a/langchain_md_files/integrations/providers/embedchain.mdx b/langchain_md_files/integrations/providers/embedchain.mdx
deleted file mode 100644
index 9078c6ab71d4dcf0990aa3a88aff0abd8ae1498e..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/embedchain.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Embedchain
-
-> [Embedchain](https://github.com/embedchain/embedchain) is a RAG framework to create 
-> data pipelines. It loads, indexes, retrieves and syncs all the data.
->
->It is available as an [open source package](https://github.com/embedchain/embedchain) 
-> and as a [hosted platform solution](https://app.embedchain.ai/).
- 
-
-## Installation and Setup
-
-Install the package using pip:
-
-```bash
-pip install embedchain
-```
-
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/embedchain).
-
-```python
-from langchain_community.retrievers import EmbedchainRetriever
-```
diff --git a/langchain_md_files/integrations/providers/epsilla.mdx b/langchain_md_files/integrations/providers/epsilla.mdx
deleted file mode 100644
index 78da4d6a984b4e6b7d5b74e815c065753c9cd825..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/epsilla.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-# Epsilla
-
-This page covers how to use [Epsilla](https://github.com/epsilla-cloud/vectordb) within LangChain.
-It is broken into two parts: installation and setup, and then references to specific Epsilla wrappers.
-
-## Installation and Setup
-
-- Install the Python SDK with `pip/pip3 install pyepsilla`
-
-## Wrappers
-
-### VectorStore
-
-There exists a wrapper around Epsilla vector databases, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-To import this vectorstore:
-
-```python
-from langchain_community.vectorstores import Epsilla
-```
-
-For a more detailed walkthrough of the Epsilla wrapper, see [this notebook](/docs/integrations/vectorstores/epsilla)
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/etherscan.mdx b/langchain_md_files/integrations/providers/etherscan.mdx
deleted file mode 100644
index cc4e197b2899e0ba573982158c493a56915199ca..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/etherscan.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
-# Etherscan
-
->[Etherscan](https://docs.etherscan.io/) is the leading blockchain explorer, 
-> search, API and analytics platform for `Ethereum`, a decentralized smart contracts platform.
- 
-
-## Installation and Setup
-
-See the detailed [installation guide](/docs/integrations/document_loaders/etherscan).
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/etherscan).
-
-```python
-from langchain_community.document_loaders import EtherscanLoader
-```
diff --git a/langchain_md_files/integrations/providers/everlyai.mdx b/langchain_md_files/integrations/providers/everlyai.mdx
deleted file mode 100644
index 2ec507030190485515356f63e0654af35080f798..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/everlyai.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Everly AI
-
-> [Everly AI](https://everlyai.xyz/) allows you to run your ML models at scale in the cloud. 
-> It also provides API access to [several LLM models](https://everlyai.xyz/).
-
-## Installation and Setup
-
-To use `Everly AI`, you will need an API key. Visit 
-[Everly AI](https://everlyai.xyz/) to create an API key in your profile.
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/everlyai).
-
-```python
-from langchain_community.chat_models import ChatEverlyAI
-```
diff --git a/langchain_md_files/integrations/providers/evernote.mdx b/langchain_md_files/integrations/providers/evernote.mdx
deleted file mode 100644
index a58c3fc0cf7cd62f5541fb37e865f34388638718..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/evernote.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# EverNote
-
->[EverNote](https://evernote.com/) is intended for archiving and creating notes in which photos, audio and saved web content can be embedded. Notes are stored in virtual "notebooks" and can be tagged, annotated, edited, searched, and exported.
-
-## Installation and Setup
-
-First, you need to install `lxml` and `html2text` python packages.
-
-```bash
-pip install lxml
-pip install html2text
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/evernote).
-
-```python
-from langchain_community.document_loaders import EverNoteLoader
-```
diff --git a/langchain_md_files/integrations/providers/facebook.mdx b/langchain_md_files/integrations/providers/facebook.mdx
deleted file mode 100644
index 6734c9462e5bb34bd1ae22cdcdcdddc40869e730..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/facebook.mdx
+++ /dev/null
@@ -1,93 +0,0 @@
-# Facebook - Meta
-
->[Meta Platforms, Inc.](https://www.facebook.com/), doing business as `Meta`, formerly 
-> named `Facebook, Inc.`, and `TheFacebook, Inc.`, is an American multinational technology 
-> conglomerate. The company owns and operates `Facebook`, `Instagram`, `Threads`, 
-> and `WhatsApp`, among other products and services.
- 
-## Embedding models
-
-### LASER
-
->[LASER](https://github.com/facebookresearch/LASER) is a Python library developed by 
-> the `Meta AI Research` team and used for 
-> creating multilingual sentence embeddings for 
-> [over 147 languages as of 2/25/2024](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200) 
-
-```bash
-pip install laser_encoders
-```
-
-See a [usage example](/docs/integrations/text_embedding/laser).
-
-```python
-from langchain_community.embeddings.laser import LaserEmbeddings
-```
-
-## Document loaders
-
-### Facebook Messenger
-
->[Messenger](https://en.wikipedia.org/wiki/Messenger_(software)) is an instant messaging app and 
-> platform developed by `Meta Platforms`. Originally developed as `Facebook Chat` in 2008, the company revamped its
-> messaging service in 2010.
-
-See a [usage example](/docs/integrations/document_loaders/facebook_chat).
-
-```python
-from langchain_community.document_loaders import FacebookChatLoader
-```
-
-## Vector stores
-
-### Facebook Faiss
-
->[Facebook AI Similarity Search (Faiss)](https://engineering.fb.com/2017/03/29/data-infrastructure/faiss-a-library-for-efficient-similarity-search/) 
-> is a library for efficient similarity search and clustering of dense vectors. It contains algorithms that 
-> search in sets of vectors of any size, up to ones that possibly do not fit in RAM. It also contains supporting 
-> code for evaluation and parameter tuning.
-
-[Faiss documentation](https://faiss.ai/).
-
-We need to install `faiss` python package.
-
-```bash
-pip install faiss-gpu # For CUDA 7.5+ supported GPU's.
-```
-
-OR
-
-```bash
-pip install faiss-cpu # For CPU Installation
-```
-
-See a [usage example](/docs/integrations/vectorstores/faiss).
-
-```python
-from langchain_community.vectorstores import FAISS
-```
-
-## Chat loaders
-
-### Facebook Messenger
-
->[Messenger](https://en.wikipedia.org/wiki/Messenger_(software)) is an instant messaging app and 
-> platform developed by `Meta Platforms`. Originally developed as `Facebook Chat` in 2008, the company revamped its
-> messaging service in 2010.
-
-See a [usage example](/docs/integrations/chat_loaders/facebook).
-
-```python
-from langchain_community.chat_loaders.facebook_messenger import (
-    FolderFacebookMessengerChatLoader,
-    SingleFileFacebookMessengerChatLoader,
-)
-```
-
-### Facebook WhatsApp
-
-See a [usage example](/docs/integrations/chat_loaders/whatsapp).
-
-```python
-from langchain_community.chat_loaders.whatsapp import WhatsAppChatLoader
-```
diff --git a/langchain_md_files/integrations/providers/fauna.mdx b/langchain_md_files/integrations/providers/fauna.mdx
deleted file mode 100644
index 252c0101d2e7c1d1f6c130ad5f3f36f6e97024ee..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/fauna.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Fauna
-
->[Fauna](https://fauna.com/) is a distributed document-relational database 
-> that combines the flexibility of documents with the power of a relational, 
-> ACID compliant database that scales across regions, clouds or the globe.
- 
-
-## Installation and Setup
-
-We have to get the secret key.
-See the detailed [guide](https://docs.fauna.com/fauna/current/learn/security_model/).
-
-We have to install the `fauna` package.
-
-```bash
-pip install -U fauna
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/fauna).
-
-```python
-from langchain_community.document_loaders.fauna import FaunaLoader
-```
diff --git a/langchain_md_files/integrations/providers/figma.mdx b/langchain_md_files/integrations/providers/figma.mdx
deleted file mode 100644
index d907a4814118f0156ba4db35be13b17de80d15ec..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/figma.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Figma
-
->[Figma](https://www.figma.com/) is a collaborative web application for interface design.
-
-## Installation and Setup
-
-The Figma API requires an `access token`, `node_ids`, and a `file key`.
-
-The `file key` can be pulled from the URL.  https://www.figma.com/file/\{filekey\}/sampleFilename
-
-`Node IDs` are also available in the URL. Click on anything and look for the '?node-id=\{node_id\}' param.
-
-`Access token` [instructions](https://help.figma.com/hc/en-us/articles/8085703771159-Manage-personal-access-tokens).
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/figma).
-
-```python
-from langchain_community.document_loaders import FigmaFileLoader
-```
diff --git a/langchain_md_files/integrations/providers/firecrawl.mdx b/langchain_md_files/integrations/providers/firecrawl.mdx
deleted file mode 100644
index 745c7637463e2993808ae56b8bf5aeb984877d95..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/firecrawl.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# FireCrawl
-
->[FireCrawl](https://firecrawl.dev/?ref=langchain) crawls and converts any website into LLM-ready data. 
-> It crawls all accessible subpages and give you clean markdown 
-> and metadata for each. No sitemap required.
-
-
-## Installation and Setup
-
-Install the python SDK:
-
-```bash
-pip install firecrawl-py==0.0.20
-```
-
-## Document loader
-
-See a [usage example](/docs/integrations/document_loaders/firecrawl).
-
-```python
-from langchain_community.document_loaders import FireCrawlLoader
-```
diff --git a/langchain_md_files/integrations/providers/flyte.mdx b/langchain_md_files/integrations/providers/flyte.mdx
deleted file mode 100644
index 5fe20d896517cc01eaf62f85eb6c33e7cdcb7b46..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/flyte.mdx
+++ /dev/null
@@ -1,153 +0,0 @@
-# Flyte
-
-> [Flyte](https://github.com/flyteorg/flyte) is an open-source orchestrator that facilitates building production-grade data and ML pipelines.
-> It is built for scalability and reproducibility, leveraging Kubernetes as its underlying platform.
-
-The purpose of this notebook is to demonstrate the integration of a `FlyteCallback` into your Flyte task, enabling you to effectively monitor and track your LangChain experiments.
-
-## Installation & Setup
-
-- Install the Flytekit library by running the command `pip install flytekit`.
-- Install the Flytekit-Envd plugin by running the command `pip install flytekitplugins-envd`.
-- Install LangChain by running the command `pip install langchain`.
-- Install [Docker](https://docs.docker.com/engine/install/) on your system.
-
-## Flyte Tasks
-
-A Flyte [task](https://docs.flyte.org/en/latest/user_guide/basics/tasks.html) serves as the foundational building block of Flyte.
-To execute LangChain experiments, you need to write Flyte tasks that define the specific steps and operations involved.
-
-NOTE: The [getting started guide](https://docs.flyte.org/projects/cookbook/en/latest/index.html) offers detailed, step-by-step instructions on installing Flyte locally and running your initial Flyte pipeline.
-
-First, import the necessary dependencies to support your LangChain experiments.
-
-```python
-import os
-
-from flytekit import ImageSpec, task
-from langchain.agents import AgentType, initialize_agent, load_tools
-from langchain.callbacks import FlyteCallbackHandler
-from langchain.chains import LLMChain
-from langchain_openai import ChatOpenAI
-from langchain_core.prompts import PromptTemplate
-from langchain_core.messages import HumanMessage
-```
-
-Set up the necessary environment variables to utilize the OpenAI API and Serp API:
-
-```python
-# Set OpenAI API key
-os.environ["OPENAI_API_KEY"] = "<your_openai_api_key>"
-
-# Set Serp API key
-os.environ["SERPAPI_API_KEY"] = "<your_serp_api_key>"
-```
-
-Replace `<your_openai_api_key>` and `<your_serp_api_key>` with your respective API keys obtained from OpenAI and Serp API.
-
-To guarantee reproducibility of your pipelines, Flyte tasks are containerized.
-Each Flyte task must be associated with an image, which can either be shared across the entire Flyte [workflow](https://docs.flyte.org/en/latest/user_guide/basics/workflows.html) or provided separately for each task.
-
-To streamline the process of supplying the required dependencies for each Flyte task, you can initialize an [`ImageSpec`](https://docs.flyte.org/en/latest/user_guide/customizing_dependencies/imagespec.html) object.
-This approach automatically triggers a Docker build, alleviating the need for users to manually create a Docker image.
-
-```python
-custom_image = ImageSpec(
-    name="langchain-flyte",
-    packages=[
-        "langchain",
-        "openai",
-        "spacy",
-        "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0.tar.gz",
-        "textstat",
-        "google-search-results",
-    ],
-    registry="<your-registry>",
-)
-```
-
-You have the flexibility to push the Docker image to a registry of your preference.
-[Docker Hub](https://hub.docker.com/) or [GitHub Container Registry (GHCR)](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry) is a convenient option to begin with.
-
-Once you have selected a registry, you can proceed to create Flyte tasks that log the LangChain metrics to Flyte Deck.
-
-The following examples demonstrate tasks related to OpenAI LLM, chains and agent with tools:
-
-### LLM
-
-```python
-@task(disable_deck=False, container_image=custom_image)
-def langchain_llm() -> str:
-    llm = ChatOpenAI(
-        model_name="gpt-3.5-turbo",
-        temperature=0.2,
-        callbacks=[FlyteCallbackHandler()],
-    )
-    return llm.invoke([HumanMessage(content="Tell me a joke")]).content
-```
-
-### Chain
-
-```python
-@task(disable_deck=False, container_image=custom_image)
-def langchain_chain() -> list[dict[str, str]]:
-    template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
-Title: {title}
-Playwright: This is a synopsis for the above play:"""
-    llm = ChatOpenAI(
-        model_name="gpt-3.5-turbo",
-        temperature=0,
-        callbacks=[FlyteCallbackHandler()],
-    )
-    prompt_template = PromptTemplate(input_variables=["title"], template=template)
-    synopsis_chain = LLMChain(
-        llm=llm, prompt=prompt_template, callbacks=[FlyteCallbackHandler()]
-    )
-    test_prompts = [
-        {
-            "title": "documentary about good video games that push the boundary of game design"
-        },
-    ]
-    return synopsis_chain.apply(test_prompts)
-```
-
-### Agent
-
-```python
-@task(disable_deck=False, container_image=custom_image)
-def langchain_agent() -> str:
-    llm = OpenAI(
-        model_name="gpt-3.5-turbo",
-        temperature=0,
-        callbacks=[FlyteCallbackHandler()],
-    )
-    tools = load_tools(
-        ["serpapi", "llm-math"], llm=llm, callbacks=[FlyteCallbackHandler()]
-    )
-    agent = initialize_agent(
-        tools,
-        llm,
-        agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
-        callbacks=[FlyteCallbackHandler()],
-        verbose=True,
-    )
-    return agent.run(
-        "Who is Leonardo DiCaprio's girlfriend? Could you calculate her current age and raise it to the power of 0.43?"
-    )
-```
-
-These tasks serve as a starting point for running your LangChain experiments within Flyte.
-
-## Execute the Flyte Tasks on Kubernetes
-
-To execute the Flyte tasks on the configured Flyte backend, use the following command:
-
-```bash
-pyflyte run --image <your-image> langchain_flyte.py langchain_llm
-```
-
-This command will initiate the execution of the `langchain_llm` task on the Flyte backend. You can trigger the remaining two tasks in a similar manner.
-
-The metrics will be displayed on the Flyte UI as follows:
-
-![Screenshot of Flyte Deck showing LangChain metrics and a dependency tree visualization.](https://ik.imagekit.io/c8zl7irwkdda/Screenshot_2023-06-20_at_1.23.29_PM_MZYeG0dKa.png?updatedAt=1687247642993 "Flyte Deck Metrics Display")
diff --git a/langchain_md_files/integrations/providers/fmp-data.mdx b/langchain_md_files/integrations/providers/fmp-data.mdx
deleted file mode 100644
index b2ab522fb03f396dbd7ddaa1c128b6136416feb8..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/fmp-data.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# FMP Data (Financial Data Prep)
-
-> [FMP-Data](https://pypi.org/project/fmp-data/) is a python package for connecting to
-> Financial Data Prep API. It simplifies how you can access production quality data.
-
-
-## Installation and Setup
-
-Get an `FMP Data` API key by
-visiting [this page](https://site.financialmodelingprep.com/pricing-plans?couponCode=mehdi).
- and set it as an environment variable (`FMP_API_KEY`).
-
-Then, install [langchain-fmp-data](https://pypi.org/project/langchain-fmp-data/).
-
-## Tools
-
-See an [example](https://github.com/MehdiZare/langchain-fmp-data/tree/main/docs).
-
-```python
-from langchain_fmp_data import FMPDataTool, FMPDataToolkit
-```
diff --git a/langchain_md_files/integrations/providers/forefrontai.mdx b/langchain_md_files/integrations/providers/forefrontai.mdx
deleted file mode 100644
index 4d447ee37a6223bf0ba94fb7502a221879f88344..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/forefrontai.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Forefront AI
-
-> [Forefront AI](https://forefront.ai/) is a platform enabling you to
-> fine-tune and inference open-source text generation models 
-
-
-## Installation and Setup
-
-Get an `ForefrontAI` API key
-visiting [this page](https://accounts.forefront.ai/sign-in?redirect_url=https%3A%2F%2Fforefront.ai%2Fapp%2Fapi-keys).
- and set it as an environment variable (`FOREFRONTAI_API_KEY`).
-
-## LLM
-
-See a [usage example](/docs/integrations/llms/forefrontai).
-
-```python
-from langchain_community.llms import ForefrontAI
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/friendli.mdx b/langchain_md_files/integrations/providers/friendli.mdx
deleted file mode 100644
index 2e8fda9ade2f6e1b5e2eab59f03e323fa2cab262..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/friendli.mdx
+++ /dev/null
@@ -1,41 +0,0 @@
-# Friendli AI
-
-> [FriendliAI](https://friendli.ai/) enhances AI application performance and optimizes 
-> cost savings with scalable, efficient deployment options, tailored for high-demand AI workloads.
-
-## Installation and setup
-
-Install the `friendli-client` python package.
-
-```bash
-pip install -U langchain_community friendli-client
-```
-
-Sign in to [Friendli Suite](https://suite.friendli.ai/) to create a Personal Access Token, 
-and set it as the `FRIENDLI_TOKEN` environment variabzle.
-
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/friendli).
-
-```python
-from langchain_community.chat_models.friendli import ChatFriendli
-
-chat = ChatFriendli(model='meta-llama-3.1-8b-instruct')
-
-for m in chat.stream("Tell me fun things to do in NYC"):
-    print(m.content, end="", flush=True)
-```
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/friendli).
-
-```python
-from langchain_community.llms.friendli import Friendli
-
-llm = Friendli(model='meta-llama-3.1-8b-instruct')
-
-print(llm.invoke("def bubble_sort(): "))
-```
diff --git a/langchain_md_files/integrations/providers/geopandas.mdx b/langchain_md_files/integrations/providers/geopandas.mdx
deleted file mode 100644
index c14a29c40bd2f7395269e527591acd5bef4c2b98..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/geopandas.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-# Geopandas
-
->[GeoPandas](https://geopandas.org/) is an open source project to make working 
-> with geospatial data in python easier. `GeoPandas` extends the datatypes used by 
-> `pandas` to allow spatial operations on geometric types. 
-> Geometric operations are performed by `shapely`.
- 
-
-## Installation and Setup
-
-We have to install several python packages.
-
-```bash
-pip install -U sodapy pandas geopandas
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/geopandas).
-
-```python
-from langchain_community.document_loaders import OpenCityDataLoader
-```
diff --git a/langchain_md_files/integrations/providers/git.mdx b/langchain_md_files/integrations/providers/git.mdx
deleted file mode 100644
index bc20c1710ca7a1be1e6d613f60ce1423345ad471..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/git.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Git
-
->[Git](https://en.wikipedia.org/wiki/Git) is a distributed version control system that tracks changes in any set of computer files, usually used for coordinating work among programmers collaboratively developing source code during software development.
-
-## Installation and Setup
-
-First, you need to install `GitPython` python package.
-
-```bash
-pip install GitPython
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/git).
-
-```python
-from langchain_community.document_loaders import GitLoader
-```
diff --git a/langchain_md_files/integrations/providers/gitbook.mdx b/langchain_md_files/integrations/providers/gitbook.mdx
deleted file mode 100644
index 4c8a8559234ee81ca4341d33712183306e8710ea..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/gitbook.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
-# GitBook
-
->[GitBook](https://docs.gitbook.com/) is a modern documentation platform where teams can document everything from products to internal knowledge bases and APIs.
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/gitbook).
-
-```python
-from langchain_community.document_loaders import GitbookLoader
-```
diff --git a/langchain_md_files/integrations/providers/github.mdx b/langchain_md_files/integrations/providers/github.mdx
deleted file mode 100644
index 0b8c369198cad08cd467100e208dabb3c9b22116..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/github.mdx
+++ /dev/null
@@ -1,45 +0,0 @@
-# GitHub
-
->[GitHub](https://github.com/) is a developer platform that allows developers to create, 
-> store, manage and share their code. It uses `Git` software, providing the 
-> distributed version control of Git plus access control, bug tracking, 
-> software feature requests, task management, continuous integration, and wikis for every project.
- 
-
-## Installation and Setup
-
-To access the GitHub API, you need a [personal access token](https://github.com/settings/tokens).
-
-
-## Document Loader
-
-There are two document loaders available for GitHub.
-
-See a [usage example](/docs/integrations/document_loaders/github).
-
-```python
-from langchain_community.document_loaders import GitHubIssuesLoader, GithubFileLoader
-```
-
-## Tools/Toolkit
-
-### GitHubToolkit
-The `GitHub` toolkit contains tools that enable an LLM agent to interact 
-with a GitHub repository. 
-
-The toolkit is a wrapper for the `PyGitHub` library.
-
-```python
-from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit
-```
-
-Learn more in the [example notebook](/docs/integrations/tools/github).
-
-### GitHubAction
-
-Tool for interacting with the GitHub API.
-
-```python
-from langchain_community.tools.github.tool import GitHubAction
-```
-
diff --git a/langchain_md_files/integrations/providers/gitlab.mdx b/langchain_md_files/integrations/providers/gitlab.mdx
deleted file mode 100644
index 5aab6d547a2af6548de4c79197c80a6a6b06a0f8..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/gitlab.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# GitLab
-
->[GitLab Inc.](https://about.gitlab.com/) is an open-core company 
-> that operates `GitLab`, a DevOps software package that can develop, 
-> secure, and operate software. `GitLab` includes a distributed version 
-> control based on Git, including features such as access control, bug tracking,
-> software feature requests, task management, and wikis for every project, 
-> as well as snippets. 
-
-
-## Tools/Toolkits
-
-### GitLabToolkit
-
-The `Gitlab` toolkit contains tools that enable an LLM agent to interact with a gitlab repository. 
-
-The toolkit is a wrapper for the `python-gitlab` library.
-
-See a [usage example](/docs/integrations/tools/gitlab).
-
-```python
-from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit
-```
-
-### GitLabAction
-
-Tool for interacting with the GitLab API.
-
-```python
-from langchain_community.tools.gitlab.tool import GitLabAction
-```
diff --git a/langchain_md_files/integrations/providers/golden.mdx b/langchain_md_files/integrations/providers/golden.mdx
deleted file mode 100644
index 7acde1e460949bd28affc5801c583e4349313585..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/golden.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-# Golden
-
->[Golden](https://golden.com) provides a set of natural language APIs for querying and enrichment using the Golden Knowledge Graph e.g. queries such as: `Products from OpenAI`, `Generative ai companies with series a funding`, and `rappers who invest` can be used to retrieve structured data about relevant entities.
->
->The `golden-query` langchain tool is a wrapper on top of the [Golden Query API](https://docs.golden.com/reference/query-api) which enables programmatic access to these results.
->See the [Golden Query API docs](https://docs.golden.com/reference/query-api) for more information.
-
-## Installation and Setup
-- Go to the [Golden API docs](https://docs.golden.com/) to get an overview about the Golden API.
-- Get your API key from the [Golden API Settings](https://golden.com/settings/api) page.
-- Save your API key into GOLDEN_API_KEY env variable
-
-## Wrappers
-
-### Utility
-
-There exists a GoldenQueryAPIWrapper utility which wraps this API. To import this utility:
-
-```python
-from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
-```
-
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/golden_query).
-
-### Tool
-
-You can also easily load this wrapper as a Tool (to use with an Agent).
-You can do this with:
-```python
-from langchain.agents import load_tools
-tools = load_tools(["golden-query"])
-```
-
-For more information on tools, see [this page](/docs/how_to/tools_builtin).
diff --git a/langchain_md_files/integrations/providers/goodfire.mdx b/langchain_md_files/integrations/providers/goodfire.mdx
deleted file mode 100644
index 176c5610a1e1eeb971fef0f82f5ff976836686bb..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/goodfire.mdx
+++ /dev/null
@@ -1,14 +0,0 @@
-# Goodfire
-
-[Goodfire](https://www.goodfire.ai/) is a research lab focused on AI safety and
-interpretability.
-
-## Installation and Setup
-
-```bash
-pip install langchain-goodfire
-```
-
-## Chat models
-
-See detail on available chat models [here](/docs/integrations/chat/goodfire).
diff --git a/langchain_md_files/integrations/providers/google.mdx b/langchain_md_files/integrations/providers/google.mdx
deleted file mode 100644
index 179d2726bd773978e21870595421bba316617f72..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/google.mdx
+++ /dev/null
@@ -1,1418 +0,0 @@
-# Google
-
-All functionality related to [Google Cloud Platform](https://cloud.google.com/) and other `Google` products.
-
-Integration packages for Gemini models and the VertexAI platform are maintained in
-the [langchain-google](https://github.com/langchain-ai/langchain-google) repository.
-You can find a host of LangChain integrations with other Google APIs in the
-[googleapis](https://github.com/googleapis?q=langchain-&type=all&language=&sort=)
-Github organization.
-
-## Chat models
-
-We recommend individual developers to start with Gemini API (`langchain-google-genai`) and move to Vertex AI (`langchain-google-vertexai`) when they need access to commercial support and higher rate limits. If you’re already Cloud-friendly or Cloud-native, then you can get started in Vertex AI straight away.
-Please see [here](https://ai.google.dev/gemini-api/docs/migrate-to-cloud) for more information.
-
-### Google Generative AI
-
-Access GoogleAI `Gemini` models such as `gemini-pro` and `gemini-pro-vision` through the `ChatGoogleGenerativeAI` class.
-
-```bash
-pip install -U langchain-google-genai
-```
-
-Configure your API key.
-
-```bash
-export GOOGLE_API_KEY=your-api-key
-```
-
-```python
-from langchain_google_genai import ChatGoogleGenerativeAI
-
-llm = ChatGoogleGenerativeAI(model="gemini-pro")
-llm.invoke("Sing a ballad of LangChain.")
-```
-
-Gemini vision model supports image inputs when providing a single chat message.
-
-```python
-from langchain_core.messages import HumanMessage
-from langchain_google_genai import ChatGoogleGenerativeAI
-
-llm = ChatGoogleGenerativeAI(model="gemini-pro-vision")
-
-message = HumanMessage(
-    content=[
-        {
-            "type": "text",
-            "text": "What's in this image?",
-        },  # You can optionally provide text parts
-        {"type": "image_url", "image_url": "https://picsum.photos/seed/picsum/200/300"},
-    ]
-)
-llm.invoke([message])
-```
-
-The value of image_url can be any of the following:
-
-- A public image URL
-- A gcs file (e.g., "gcs://path/to/file.png")
-- A local file path
-- A base64 encoded image (e.g., data:image/png;base64,abcd124)
-- A PIL image
-
-### Vertex AI
-
-Access chat models like `Gemini` via Google Cloud.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-See a [usage example](/docs/integrations/chat/google_vertex_ai_palm).
-
-```python
-from langchain_google_vertexai import ChatVertexAI
-```
-
-### Anthropic on Vertex AI Model Garden
-
-See a [usage example](/docs/integrations/llms/google_vertex_ai_palm).
-
-```python
-from langchain_google_vertexai.model_garden import ChatAnthropicVertex
-```
-
-### Llama on Vertex AI Model Garden
-
-```python
-from langchain_google_vertexai.model_garden_maas.llama import VertexModelGardenLlama
-```
-
-### Mistral on Vertex AI Model Garden
-
-```python
-from langchain_google_vertexai.model_garden_maas.mistral import VertexModelGardenMistral
-```
-
-### Gemma local from Hugging Face
-
->Local `Gemma` model loaded from `HuggingFace`.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.gemma import GemmaChatLocalHF
-```
-
-### Gemma local from Kaggle
-
->Local `Gemma` model loaded from `Kaggle`.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.gemma import GemmaChatLocalKaggle
-```
-
-### Gemma on Vertex AI Model Garden
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.gemma import GemmaChatVertexAIModelGarden
-```
-
-### Vertex AI image captioning
-
->Implementation of the `Image Captioning model` as a chat.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.vision_models import VertexAIImageCaptioningChat
-```
-
-### Vertex AI image editor
-
->Given an image and a prompt, edit the image. Currently only supports mask-free editing.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.vision_models import VertexAIImageEditorChat
-```
-
-### Vertex AI image generator
-
->Generates an image from a prompt.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.vision_models import VertexAIImageGeneratorChat
-```
-
-### Vertex AI visual QnA
-
->Chat implementation of a visual QnA model
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.vision_models import VertexAIVisualQnAChat
-```
-
-## LLMs
-
-### Google Generative AI
-
-Access GoogleAI `Gemini` models such as `gemini-pro` and `gemini-pro-vision` through the `GoogleGenerativeAI` class.
-
-Install python package.
-
-```bash
-pip install langchain-google-genai
-```
-
-See a [usage example](/docs/integrations/llms/google_ai).
-
-```python
-from langchain_google_genai import GoogleGenerativeAI
-```
-
-### Vertex AI Model Garden
-
-Access `PaLM` and hundreds of OSS models via `Vertex AI Model Garden` service.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-See a [usage example](/docs/integrations/llms/google_vertex_ai_palm#vertex-model-garden).
-
-```python
-from langchain_google_vertexai import VertexAIModelGarden
-```
-
-### Gemma local from Hugging Face
-
->Local `Gemma` model loaded from `HuggingFace`.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.gemma import GemmaLocalHF
-```
-
-### Gemma local from Kaggle
-
->Local `Gemma` model loaded from `Kaggle`.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.gemma import GemmaLocalKaggle
-```
-
-### Gemma on Vertex AI Model Garden
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.gemma import GemmaVertexAIModelGarden
-```
-
-### Vertex AI image captioning
-
->Implementation of the `Image Captioning model` as an LLM.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.vision_models import VertexAIImageCaptioning
-```
-
-## Embedding models
-
-### Google Generative AI embedding
-
-See a [usage example](/docs/integrations/text_embedding/google_generative_ai).
-
-```bash
-pip install -U langchain-google-genai
-```
-
-Configure your API key.
-
-```bash
-export GOOGLE_API_KEY=your-api-key
-```
-
-```python
-from langchain_google_genai import GoogleGenerativeAIEmbeddings
-```
-
-### Google Generative AI server-side embedding
-
-Install the python package:
-
-```bash
-pip install langchain-google-genai
-```
-
-```python
-from langchain_google_genai.google_vector_store import ServerSideEmbedding
-```
-
-### Vertex AI
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-See a [usage example](/docs/integrations/text_embedding/google_vertex_ai_palm).
-
-```python
-from langchain_google_vertexai import VertexAIEmbeddings
-```
-
-### Palm embedding
-
-We need to install `langchain-community` python package.
-
-```bash
-pip install langchain-community
-```
-
-```python
-from langchain_community.embeddings.google_palm import GooglePalmEmbeddings
-```
-
-## Document Loaders
-
-### AlloyDB for PostgreSQL
-
-> [Google Cloud AlloyDB](https://cloud.google.com/alloydb) is a fully managed relational database service that offers high performance, seamless integration, and impressive scalability on Google Cloud. AlloyDB is 100% compatible with PostgreSQL.
-
-Install the python package:
-
-```bash
-pip install langchain-google-alloydb-pg
-```
-
-See [usage example](/docs/integrations/document_loaders/google_alloydb).
-
-```python
-from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBLoader
-```
-
-### BigQuery
-
-> [Google Cloud BigQuery](https://cloud.google.com/bigquery) is a serverless and cost-effective enterprise data warehouse that works across clouds and scales with your data in Google Cloud.
-
-We need to install `langchain-google-community` with Big Query dependencies:
-
-```bash
-pip install langchain-google-community[bigquery]
-```
-
-See a [usage example](/docs/integrations/document_loaders/google_bigquery).
-
-```python
-from langchain_google_community import BigQueryLoader
-```
-
-### Bigtable
-
-> [Google Cloud Bigtable](https://cloud.google.com/bigtable/docs) is Google's fully managed NoSQL Big Data database service in Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-bigtable
-```
-
-See [Googel Cloud usage example](/docs/integrations/document_loaders/google_bigtable).
-
-```python
-from langchain_google_bigtable import BigtableLoader
-```
-
-### Cloud SQL for MySQL
-
-> [Google Cloud SQL for MySQL](https://cloud.google.com/sql) is a fully-managed database service that helps you set up, maintain, manage, and administer your MySQL relational databases on Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-cloud-sql-mysql
-```
-
-See [usage example](/docs/integrations/document_loaders/google_cloud_sql_mysql).
-
-```python
-from langchain_google_cloud_sql_mysql import MySQLEngine, MySQLLoader
-```
-
-### Cloud SQL for SQL Server
-
-> [Google Cloud SQL for SQL Server](https://cloud.google.com/sql) is a fully-managed database service that helps you set up, maintain, manage, and administer your SQL Server databases on Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-cloud-sql-mssql
-```
-
-See [usage example](/docs/integrations/document_loaders/google_cloud_sql_mssql).
-
-```python
-from langchain_google_cloud_sql_mssql import MSSQLEngine, MSSQLLoader
-```
-
-### Cloud SQL for PostgreSQL
-
-> [Google Cloud SQL for PostgreSQL](https://cloud.google.com/sql) is a fully-managed database service that helps you set up, maintain, manage, and administer your PostgreSQL relational databases on Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-cloud-sql-pg
-```
-
-See [usage example](/docs/integrations/document_loaders/google_cloud_sql_pg).
-
-```python
-from langchain_google_cloud_sql_pg import PostgresEngine, PostgresLoader
-```
-
-### Cloud Storage
-
->[Cloud Storage](https://en.wikipedia.org/wiki/Google_Cloud_Storage) is a managed service for storing unstructured data in Google Cloud.
-
-We need to install `langchain-google-community` with Google Cloud Storage dependencies.
-
-```bash
-pip install langchain-google-community[gcs]
-```
-
-There are two loaders for the `Google Cloud Storage`: the `Directory` and the `File` loaders.
-
-See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_directory).
-
-```python
-from langchain_google_community import GCSDirectoryLoader
-```
-See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_file).
-
-```python
-from langchain_google_community import GCSFileLoader
-```
-
-### Cloud Vision loader
-
-Install the python package:
-
-```bash
-pip install langchain-google-community[vision]
-```
-
-```python
-from langchain_google_community.vision import CloudVisionLoader
-```
-
-### El Carro for Oracle Workloads
-
-> Google [El Carro Oracle Operator](https://github.com/GoogleCloudPlatform/elcarro-oracle-operator)
-offers a way to run Oracle databases in Kubernetes as a portable, open source,
-community driven, no vendor lock-in container orchestration system.
-
-```bash
-pip install langchain-google-el-carro
-```
-
-See [usage example](/docs/integrations/document_loaders/google_el_carro).
-
-```python
-from langchain_google_el_carro import ElCarroLoader
-```
-
-### Google Drive
-
->[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) is a file storage and synchronization service developed by Google.
-
-Currently, only `Google Docs` are supported.
-
-We need to install `langchain-google-community` with Google Drive dependencies.
-
-```bash
-pip install langchain-google-community[drive]
-```
-
-See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_drive).
-
-```python
-from langchain_google_community import GoogleDriveLoader
-```
-
-### Firestore (Native Mode)
-
-> [Google Cloud Firestore](https://cloud.google.com/firestore/docs/) is a NoSQL document database built for automatic scaling, high performance, and ease of application development.
-
-Install the python package:
-
-```bash
-pip install langchain-google-firestore
-```
-
-See [usage example](/docs/integrations/document_loaders/google_firestore).
-
-```python
-from langchain_google_firestore import FirestoreLoader
-```
-
-### Firestore (Datastore Mode)
-
-> [Google Cloud Firestore in Datastore mode](https://cloud.google.com/datastore/docs) is a NoSQL document database built for automatic scaling, high performance, and ease of application development.
-> Firestore is the newest version of Datastore and introduces several improvements over Datastore.
-
-Install the python package:
-
-```bash
-pip install langchain-google-datastore
-```
-
-See [usage example](/docs/integrations/document_loaders/google_datastore).
-
-```python
-from langchain_google_datastore import DatastoreLoader
-```
-
-### Memorystore for Redis
-
-> [Google Cloud Memorystore for Redis](https://cloud.google.com/memorystore/docs/redis) is a fully managed Redis service for Google Cloud. Applications running on Google Cloud can achieve extreme performance by leveraging the highly scalable, available, secure Redis service without the burden of managing complex Redis deployments.
-
-Install the python package:
-
-```bash
-pip install langchain-google-memorystore-redis
-```
-
-See [usage example](/docs/integrations/document_loaders/google_memorystore_redis).
-
-```python
-from langchain_google_memorystore_redis import MemorystoreDocumentLoader
-```
-
-### Spanner
-
-> [Google Cloud Spanner](https://cloud.google.com/spanner/docs) is a fully managed, mission-critical, relational database service on Google Cloud that offers transactional consistency at global scale, automatic, synchronous replication for high availability, and support for two SQL dialects: GoogleSQL (ANSI 2011 with extensions) and PostgreSQL.
-
-Install the python package:
-
-```bash
-pip install langchain-google-spanner
-```
-
-See [usage example](/docs/integrations/document_loaders/google_spanner).
-
-```python
-from langchain_google_spanner import SpannerLoader
-```
-
-### Speech-to-Text
-
-> [Google Cloud Speech-to-Text](https://cloud.google.com/speech-to-text) is an audio transcription API powered by Google's speech recognition models in Google Cloud.
-
-This document loader transcribes audio files and outputs the text results as Documents.
-
-First, we need to install `langchain-google-community` with speech-to-text dependencies.
-
-```bash
-pip install langchain-google-community[speech]
-```
-
-See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_speech_to_text).
-
-```python
-from langchain_google_community import SpeechToTextLoader
-```
-
-## Document Transformers
-
-### Document AI
-
->[Google Cloud Document AI](https://cloud.google.com/document-ai/docs/overview) is a Google Cloud
-> service that transforms unstructured data from documents into structured data, making it easier
-> to understand, analyze, and consume.
-
-We need to set up a [`GCS` bucket and create your own OCR processor](https://cloud.google.com/document-ai/docs/create-processor)
-The `GCS_OUTPUT_PATH` should be a path to a folder on GCS (starting with `gs://`)
-and a processor name should look like `projects/PROJECT_NUMBER/locations/LOCATION/processors/PROCESSOR_ID`.
-We can get it either programmatically or copy from the `Prediction endpoint` section of the `Processor details`
-tab in the Google Cloud Console.
-
-```bash
-pip install langchain-google-community[docai]
-```
-
-See a [usage example](/docs/integrations/document_transformers/google_docai).
-
-```python
-from langchain_core.document_loaders.blob_loaders import Blob
-from langchain_google_community import DocAIParser
-```
-
-### Google Translate
-
-> [Google Translate](https://translate.google.com/) is a multilingual neural machine
-> translation service developed by Google to translate text, documents and websites
-> from one language into another.
-
-The `GoogleTranslateTransformer` allows you to translate text and HTML with the [Google Cloud Translation API](https://cloud.google.com/translate).
-
-First, we need to install the `langchain-google-community` with translate dependencies.
-
-```bash
-pip install langchain-google-community[translate]
-```
-
-See a [usage example and authorization instructions](/docs/integrations/document_transformers/google_translate).
-
-```python
-from langchain_google_community import GoogleTranslateTransformer
-```
-
-## Vector Stores
-
-### AlloyDB for PostgreSQL
-
-> [Google Cloud AlloyDB](https://cloud.google.com/alloydb) is a fully managed relational database service that offers high performance, seamless integration, and impressive scalability on Google Cloud. AlloyDB is 100% compatible with PostgreSQL.
-
-Install the python package:
-
-```bash
-pip install langchain-google-alloydb-pg
-```
-
-See [usage example](/docs/integrations/vectorstores/google_alloydb).
-
-```python
-from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBVectorStore
-```
-
-### BigQuery Vector Search
-
-> [Google Cloud BigQuery](https://cloud.google.com/bigquery),
-> BigQuery is a serverless and cost-effective enterprise data warehouse in Google Cloud.
->
-> [Google Cloud BigQuery Vector Search](https://cloud.google.com/bigquery/docs/vector-search-intro)
-> BigQuery vector search lets you use GoogleSQL to do semantic search, using vector indexes for fast but approximate results, or using brute force for exact results.
-
-> It can calculate Euclidean or Cosine distance. With LangChain, we default to use Euclidean distance.
-
-We need to install several python packages.
-
-```bash
-pip install google-cloud-bigquery
-```
-
-See a [usage example](/docs/integrations/vectorstores/google_bigquery_vector_search).
-
-```python
-from langchain.vectorstores import BigQueryVectorSearch
-```
-
-### Memorystore for Redis
-
-> [Google Cloud Memorystore for Redis](https://cloud.google.com/memorystore/docs/redis) is a fully managed Redis service for Google Cloud. Applications running on Google Cloud can achieve extreme performance by leveraging the highly scalable, available, secure Redis service without the burden of managing complex Redis deployments.
-
-Install the python package:
-
-```bash
-pip install langchain-google-memorystore-redis
-```
-
-See [usage example](/docs/integrations/vectorstores/google_memorystore_redis).
-
-```python
-from langchain_google_memorystore_redis import RedisVectorStore
-```
-
-### Spanner
-
-> [Google Cloud Spanner](https://cloud.google.com/spanner/docs) is a fully managed, mission-critical, relational database service on Google Cloud that offers transactional consistency at global scale, automatic, synchronous replication for high availability, and support for two SQL dialects: GoogleSQL (ANSI 2011 with extensions) and PostgreSQL.
-
-Install the python package:
-
-```bash
-pip install langchain-google-spanner
-```
-
-See [usage example](/docs/integrations/vectorstores/google_spanner).
-
-```python
-from langchain_google_spanner import SpannerVectorStore
-```
-
-### Firestore (Native Mode)
-
-> [Google Cloud Firestore](https://cloud.google.com/firestore/docs/) is a NoSQL document database built for automatic scaling, high performance, and ease of application development.
-
-Install the python package:
-
-```bash
-pip install langchain-google-firestore
-```
-
-See [usage example](/docs/integrations/vectorstores/google_firestore).
-
-```python
-from langchain_google_firestore import FirestoreVectorStore
-```
-
-### Cloud SQL for MySQL
-
-> [Google Cloud SQL for MySQL](https://cloud.google.com/sql) is a fully-managed database service that helps you set up, maintain, manage, and administer your MySQL relational databases on Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-cloud-sql-mysql
-```
-
-See [usage example](/docs/integrations/vectorstores/google_cloud_sql_mysql).
-
-```python
-from langchain_google_cloud_sql_mysql import MySQLEngine, MySQLVectorStore
-```
-
-### Cloud SQL for PostgreSQL
-
-> [Google Cloud SQL for PostgreSQL](https://cloud.google.com/sql) is a fully-managed database service that helps you set up, maintain, manage, and administer your PostgreSQL relational databases on Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-cloud-sql-pg
-```
-
-See [usage example](/docs/integrations/vectorstores/google_cloud_sql_pg).
-
-```python
-from langchain_google_cloud_sql_pg import PostgresEngine, PostgresVectorStore
-```
-
-### Vertex AI Vector Search
-
-> [Google Cloud Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/vector-search/overview) from Google Cloud,
-> formerly known as `Vertex AI Matching Engine`, provides the industry's leading high-scale
-> low latency vector database. These vector databases are commonly
-> referred to as vector similarity-matching or an approximate nearest neighbor (ANN) service.
-
-Install the python package:
-
-```bash
-pip install langchain-google-vertexai
-```
-
-See a [usage example](/docs/integrations/vectorstores/google_vertex_ai_vector_search).
-
-```python
-from langchain_google_vertexai import VectorSearchVectorStore
-```
-
-### Vertex AI Vector Search with DataStore
-
-> VectorSearch with DatasTore document storage.
-
-Install the python package:
-
-```bash
-pip install langchain-google-vertexai
-```
-
-See a [usage example](/docs/integrations/vectorstores/google_vertex_ai_vector_search/#optional--you-can-also-create-vectore-and-store-chunks-in-a-datastore).
-
-```python
-from langchain_google_vertexai import VectorSearchVectorStoreDatastore
-```
-
-### VectorSearchVectorStoreGCS 
-
-> Alias of `VectorSearchVectorStore` for consistency 
-> with the rest of vector stores with different document storage backends.
-
-Install the python package:
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai import VectorSearchVectorStoreGCS
-```
-
-### Google Generative AI Vector Store 
-
-> Currently, it computes the embedding vectors on the server side.
-> For more information visit [Guide](https://developers.generativeai.google/guide).
-
-Install the python package:
-
-```bash
-pip install langchain-google-genai
-```
-
-```python
-from langchain_google_genai.google_vector_store import GoogleVectorStore
-```
-
-### ScaNN
-
->[Google ScaNN](https://github.com/google-research/google-research/tree/master/scann)
-> (Scalable Nearest Neighbors) is a python package.
->
->`ScaNN` is a method for efficient vector similarity search at scale.
-
->`ScaNN` includes search space pruning and quantization for Maximum Inner
-> Product Search and also supports other distance functions such as
-> Euclidean distance. The implementation is optimized for x86 processors
-> with AVX2 support. See its [Google Research github](https://github.com/google-research/google-research/tree/master/scann)
-> for more details.
-
-We need to install `scann` python package.
-
-```bash
-pip install scann
-```
-
-See a [usage example](/docs/integrations/vectorstores/scann).
-
-```python
-from langchain_community.vectorstores import ScaNN
-```
-
-## Retrievers
-
-### Google Drive
-
-We need to install several python packages.
-
-```bash
-pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib langchain-googledrive
-```
-
-See a [usage example and authorization instructions](/docs/integrations/retrievers/google_drive).
-
-```python
-from langchain_googledrive.retrievers import GoogleDriveRetriever
-```
-
-### Vertex AI Search
-
-> [Vertex AI Search](https://cloud.google.com/generative-ai-app-builder/docs/introduction)
-> from Google Cloud allows developers to quickly build generative AI powered search engines for customers and employees.
-
-See a [usage example](/docs/integrations/retrievers/google_vertex_ai_search).
-
-Note: `GoogleVertexAISearchRetriever` is deprecated, use `VertexAIMultiTurnSearchRetriever`,
-`VertexAISearchSummaryTool`, and `VertexAISearchRetriever` (see below).
-
-#### GoogleVertexAISearchRetriever
-
-We need to install the `google-cloud-discoveryengine` python package.
-
-```bash
-pip install google-cloud-discoveryengine
-```
-
-```python
-from langchain_community.retrievers import GoogleVertexAISearchRetriever
-```
-
-#### VertexAIMultiTurnSearchRetriever
-
-```python
-from langchain_google_community import VertexAIMultiTurnSearchRetriever
-```
-#### VertexAISearchRetriever
-
-```python
-from langchain_google_community import VertexAIMultiTurnSearchRetriever
-```
-
-#### VertexAISearchSummaryTool
-
-```python
-from langchain_google_community import VertexAISearchSummaryTool
-```
-
-### Document AI Warehouse
-
-> [Document AI Warehouse](https://cloud.google.com/document-ai-warehouse)
-> from Google Cloud allows enterprises to search, store, govern, and manage documents and their AI-extracted
-> data and metadata in a single platform.
-
-Note: `GoogleDocumentAIWarehouseRetriever` is deprecated, use `DocumentAIWarehouseRetriever` (see below).
-```python
-from langchain.retrievers import GoogleDocumentAIWarehouseRetriever
-docai_wh_retriever = GoogleDocumentAIWarehouseRetriever(
-    project_number=...
-)
-query = ...
-documents = docai_wh_retriever.invoke(
-    query, user_ldap=...
-)
-```
-
-```python
-from langchain_google_community.documentai_warehouse import DocumentAIWarehouseRetriever
-```
-
-## Tools
-
-### Text-to-Speech
-
->[Google Cloud Text-to-Speech](https://cloud.google.com/text-to-speech) is a Google Cloud service that enables developers to
-> synthesize natural-sounding speech with 100+ voices, available in multiple languages and variants.
-> It applies DeepMind’s groundbreaking research in WaveNet and Google’s powerful neural networks
-> to deliver the highest fidelity possible.
-
-We need to install python packages.
-
-```bash
-pip install google-cloud-text-to-speech langchain-google-community
-```
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_cloud_texttospeech).
-
-```python
-from langchain_google_community import TextToSpeechTool
-```
-
-### Google Drive
-
-We need to install several python packages.
-
-```bash
-pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib
-pip install langchain-googledrive
-```
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_drive).
-
-```python
-from langchain_googledrive.utilities.google_drive import GoogleDriveAPIWrapper
-from langchain_googledrive.tools.google_drive.tool import GoogleDriveSearchTool
-```
-
-### Google Finance
-
-We need to install a python package.
-
-```bash
-pip install google-search-results
-```
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_finance).
-
-```python
-from langchain_community.tools.google_finance import GoogleFinanceQueryRun
-from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
-```
-
-### Google Jobs
-
-We need to install a python package.
-
-```bash
-pip install google-search-results
-```
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_jobs).
-
-```python
-from langchain_community.tools.google_jobs import GoogleJobsQueryRun
-from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
-```
-
-### Google Lens
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_lens).
-
-```python
-from langchain_community.tools.google_lens import GoogleLensQueryRun
-from langchain_community.utilities.google_lens import GoogleLensAPIWrapper
-```
-
-### Google Places
-
-We need to install a python package.
-
-```bash
-pip install googlemaps
-```
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_places).
-
-```python
-from langchain.tools import GooglePlacesTool
-```
-
-### Google Scholar
-
-We need to install a python package.
-
-```bash
-pip install google-search-results
-```
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_scholar).
-
-```python
-from langchain_community.tools.google_scholar import GoogleScholarQueryRun
-from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
-```
-
-### Google Search
-
-- Set up a Custom Search Engine, following [these instructions](https://stackoverflow.com/questions/37083058/programmatically-searching-google-in-python-using-custom-search)
-- Get an API Key and Custom Search Engine ID from the previous step, and set them as environment variables
-`GOOGLE_API_KEY` and `GOOGLE_CSE_ID` respectively.
-
-```python
-from langchain_google_community import GoogleSearchAPIWrapper
-```
-
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_search).
-
-We can easily load this wrapper as a Tool (to use with an Agent). We can do this with:
-
-```python
-from langchain.agents import load_tools
-tools = load_tools(["google-search"])
-```
-
-#### GoogleSearchResults
-
-Tool that queries the `Google Search` API (via `GoogleSearchAPIWrapper`) and gets back JSON.
-
-```python
-from langchain_community.tools import GoogleSearchResults
-```
-
-#### GoogleSearchRun
-
-Tool that queries the `Google Search` API (via `GoogleSearchAPIWrapper`).
-
-```python
-from langchain_community.tools import GoogleSearchRun
-```
-
-
-### Google Trends
-
-We need to install a python package.
-
-```bash
-pip install google-search-results
-```
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_trends).
-
-```python
-from langchain_community.tools.google_trends import GoogleTrendsQueryRun
-from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
-```
-
-## Toolkits
-
-### GMail
-
-> [Google Gmail](https://en.wikipedia.org/wiki/Gmail) is a free email service provided by Google.
-This toolkit works with emails through the `Gmail API`.
-
-We need to install `langchain-google-community` with required dependencies:
-
-```bash
-pip install langchain-google-community[gmail]
-```
-
-See a [usage example and authorization instructions](/docs/integrations/tools/gmail).
-
-```python
-from langchain_google_community import GmailToolkit
-```
-
-#### GMail individual tools
-
-You can use individual tools from GMail Toolkit.
-
-```python
-from langchain_google_community.gmail.create_draft import GmailCreateDraft
-from langchain_google_community.gmail.get_message import GmailGetMessage
-from langchain_google_community.gmail.get_thread import GmailGetThread
-from langchain_google_community.gmail.search import GmailSearch
-from langchain_google_community.gmail.send_message import GmailSendMessage
-```
-
-## Memory
-
-### AlloyDB for PostgreSQL
-
-> [AlloyDB for PostgreSQL](https://cloud.google.com/alloydb) is a fully managed relational database service that offers high performance, seamless integration, and impressive scalability on Google Cloud. AlloyDB is 100% compatible with PostgreSQL.
-
-Install the python package:
-
-```bash
-pip install langchain-google-alloydb-pg
-```
-
-See [usage example](/docs/integrations/memory/google_alloydb).
-
-```python
-from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBChatMessageHistory
-```
-
-### Cloud SQL for PostgreSQL
-
-> [Cloud SQL for PostgreSQL](https://cloud.google.com/sql) is a fully-managed database service that helps you set up, maintain, manage, and administer your PostgreSQL relational databases on Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-cloud-sql-pg
-```
-
-See [usage example](/docs/integrations/memory/google_sql_pg).
-
-
-```python
-from langchain_google_cloud_sql_pg import PostgresEngine, PostgresChatMessageHistory
-```
-
-### Cloud SQL for MySQL
-
-> [Cloud SQL for MySQL](https://cloud.google.com/sql) is a fully-managed database service that helps you set up, maintain, manage, and administer your MySQL relational databases on Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-cloud-sql-mysql
-```
-
-See [usage example](/docs/integrations/memory/google_sql_mysql).
-
-```python
-from langchain_google_cloud_sql_mysql import MySQLEngine, MySQLChatMessageHistory
-```
-
-### Cloud SQL for SQL Server
-
-> [Cloud SQL for SQL Server](https://cloud.google.com/sql) is a fully-managed database service that helps you set up, maintain, manage, and administer your SQL Server databases on Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-cloud-sql-mssql
-```
-
-See [usage example](/docs/integrations/memory/google_sql_mssql).
-
-```python
-from langchain_google_cloud_sql_mssql import MSSQLEngine, MSSQLChatMessageHistory
-```
-
-### Spanner
-
-> [Google Cloud Spanner](https://cloud.google.com/spanner/docs) is a fully managed, mission-critical, relational database service on Google Cloud that offers transactional consistency at global scale, automatic, synchronous replication for high availability, and support for two SQL dialects: GoogleSQL (ANSI 2011 with extensions) and PostgreSQL.
-
-Install the python package:
-
-```bash
-pip install langchain-google-spanner
-```
-
-See [usage example](/docs/integrations/memory/google_spanner).
-
-```python
-from langchain_google_spanner import SpannerChatMessageHistory
-```
-
-### Memorystore for Redis
-
-> [Google Cloud Memorystore for Redis](https://cloud.google.com/memorystore/docs/redis) is a fully managed Redis service for Google Cloud. Applications running on Google Cloud can achieve extreme performance by leveraging the highly scalable, available, secure Redis service without the burden of managing complex Redis deployments.
-
-Install the python package:
-
-```bash
-pip install langchain-google-memorystore-redis
-```
-
-See [usage example](/docs/integrations/document_loaders/google_memorystore_redis).
-
-```python
-from langchain_google_memorystore_redis import MemorystoreChatMessageHistory
-```
-
-### Bigtable
-
-> [Google Cloud Bigtable](https://cloud.google.com/bigtable/docs) is Google's fully managed NoSQL Big Data database service in Google Cloud.
-
-Install the python package:
-
-```bash
-pip install langchain-google-bigtable
-```
-
-See [usage example](/docs/integrations/memory/google_bigtable).
-
-```python
-from langchain_google_bigtable import BigtableChatMessageHistory
-```
-
-### Firestore (Native Mode)
-
-> [Google Cloud Firestore](https://cloud.google.com/firestore/docs/) is a NoSQL document database built for automatic scaling, high performance, and ease of application development.
-
-Install the python package:
-
-```bash
-pip install langchain-google-firestore
-```
-
-See [usage example](/docs/integrations/memory/google_firestore).
-
-```python
-from langchain_google_firestore import FirestoreChatMessageHistory
-```
-
-### Firestore (Datastore Mode)
-
-> [Google Cloud Firestore in Datastore mode](https://cloud.google.com/datastore/docs) is a NoSQL document database built for automatic scaling, high performance, and ease of application development.
-> Firestore is the newest version of Datastore and introduces several improvements over Datastore.
-
-Install the python package:
-
-```bash
-pip install langchain-google-datastore
-```
-
-See [usage example](/docs/integrations/memory/google_firestore_datastore).
-
-```python
-from langchain_google_datastore import DatastoreChatMessageHistory
-```
-
-### El Carro: The Oracle Operator for Kubernetes
-
-> Google [El Carro Oracle Operator for Kubernetes](https://github.com/GoogleCloudPlatform/elcarro-oracle-operator)
-offers a way to run `Oracle` databases in `Kubernetes` as a portable, open source,
-community driven, no vendor lock-in container orchestration system.
-
-```bash
-pip install langchain-google-el-carro
-```
-
-See [usage example](/docs/integrations/memory/google_el_carro).
-
-```python
-from langchain_google_el_carro import ElCarroChatMessageHistory
-```
-
-## Callbacks
-
-### Vertex AI callback handler
-
->Callback Handler that tracks `VertexAI` info.
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-```python
-from langchain_google_vertexai.callbacks import VertexAICallbackHandler
-```
-
-## Chat Loaders
-
-### GMail
-
-> [Gmail](https://en.wikipedia.org/wiki/Gmail) is a free email service provided by Google.
-This loader works with emails through the `Gmail API`.
-
-We need to install `langchain-google-community` with underlying dependencies.
-
-```bash
-pip install langchain-google-community[gmail]
-```
-
-See a [usage example and authorization instructions](/docs/integrations/chat_loaders/gmail).
-
-```python
-from langchain_google_community import GMailLoader
-```
-
-## Evaluators
-
-We need to install `langchain-google-vertexai` python package.
-
-```bash
-pip install langchain-google-vertexai
-```
-
-### VertexPairWiseStringEvaluator
-
->Pair-wise evaluation of the perplexity of a predicted string.
-
-```python
-from langchain_google_vertexai.evaluators.evaluation import VertexPairWiseStringEvaluator
-```
-
-### VertexStringEvaluator
-
->Evaluate the perplexity of a predicted string.
-
-```python
-from langchain_google_vertexai.evaluators.evaluation import VertexPairWiseStringEvaluator
-```
-
-## 3rd Party Integrations
-
-### SearchApi
-
->[SearchApi](https://www.searchapi.io/) provides a 3rd-party API to access Google search results, YouTube search & transcripts, and other Google-related engines.
-
-See [usage examples and authorization instructions](/docs/integrations/tools/searchapi).
-
-```python
-from langchain_community.utilities import SearchApiAPIWrapper
-```
-
-### SerpApi
-
->[SerpApi](https://serpapi.com/) provides a 3rd-party API to access Google search results.
-
-See a [usage example and authorization instructions](/docs/integrations/tools/serpapi).
-
-```python
-from langchain_community.utilities import SerpAPIWrapper
-```
-
-### Serper.dev
-
-See a [usage example and authorization instructions](/docs/integrations/tools/google_serper).
-
-```python
-from langchain_community.utilities import GoogleSerperAPIWrapper
-```
-
-### YouTube
-
->[YouTube Search](https://github.com/joetats/youtube_search) package searches `YouTube` videos avoiding using their heavily rate-limited API.
->
->It uses the form on the YouTube homepage and scrapes the resulting page.
-
-We need to install a python package.
-
-```bash
-pip install youtube_search
-```
-
-See a [usage example](/docs/integrations/tools/youtube).
-
-```python
-from langchain.tools import YouTubeSearchTool
-```
-
-### YouTube audio
-
->[YouTube](https://www.youtube.com/) is an online video sharing and social media platform created by `Google`.
-
-Use `YoutubeAudioLoader` to fetch / download the audio files.
-
-Then, use `OpenAIWhisperParser` to transcribe them to text.
-
-We need to install several python packages.
-
-```bash
-pip install yt_dlp pydub librosa
-```
-
-See a [usage example and authorization instructions](/docs/integrations/document_loaders/youtube_audio).
-
-```python
-from langchain_community.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
-from langchain_community.document_loaders.parsers import OpenAIWhisperParser, OpenAIWhisperParserLocal
-```
-
-### YouTube transcripts
-
->[YouTube](https://www.youtube.com/) is an online video sharing and social media platform created by `Google`.
-
-We need to install `youtube-transcript-api` python package.
-
-```bash
-pip install youtube-transcript-api
-```
-
-See a [usage example](/docs/integrations/document_loaders/youtube_transcript).
-
-```python
-from langchain_community.document_loaders import YoutubeLoader
-```
diff --git a/langchain_md_files/integrations/providers/google_serper.mdx b/langchain_md_files/integrations/providers/google_serper.mdx
deleted file mode 100644
index 0401e66b53581edbfe313c50ae73d88a5c1efd4d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/google_serper.mdx
+++ /dev/null
@@ -1,74 +0,0 @@
-# Serper - Google Search API
-
-This page covers how to use the [Serper](https://serper.dev) Google Search API within LangChain. Serper is a low-cost Google Search API that can be used to add answer box, knowledge graph, and organic results data from Google Search. 
-It is broken into two parts: setup, and then references to the specific Google Serper wrapper.
-
-## Setup
-
-- Go to [serper.dev](https://serper.dev) to sign up for a free account
-- Get the api key and set it as an environment variable (`SERPER_API_KEY`)
-
-## Wrappers
-
-### Utility
-
-There exists a GoogleSerperAPIWrapper utility which wraps this API. To import this utility:
-
-```python
-from langchain_community.utilities import GoogleSerperAPIWrapper
-```
-
-You can use it as part of a Self Ask chain:
-
-```python
-from langchain_community.utilities import GoogleSerperAPIWrapper
-from langchain_openai import OpenAI
-from langchain.agents import initialize_agent, Tool
-from langchain.agents import AgentType
-
-import os
-
-os.environ["SERPER_API_KEY"] = ""
-os.environ['OPENAI_API_KEY'] = ""
-
-llm = OpenAI(temperature=0)
-search = GoogleSerperAPIWrapper()
-tools = [
-    Tool(
-        name="Intermediate Answer",
-        func=search.run,
-        description="useful for when you need to ask with search"
-    )
-]
-
-self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)
-self_ask_with_search.run("What is the hometown of the reigning men's U.S. Open champion?")
-```
-
-#### Output
-```
-Entering new AgentExecutor chain...
- Yes.
-Follow up: Who is the reigning men's U.S. Open champion?
-Intermediate answer: Current champions Carlos Alcaraz, 2022 men's singles champion.
-Follow up: Where is Carlos Alcaraz from?
-Intermediate answer: El Palmar, Spain
-So the final answer is: El Palmar, Spain
-
-> Finished chain.
-
-'El Palmar, Spain'
-```
-
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_serper).
-
-### Tool
-
-You can also easily load this wrapper as a Tool (to use with an Agent).
-You can do this with:
-```python
-from langchain.agents import load_tools
-tools = load_tools(["google-serper"])
-```
-
-For more information on tools, see [this page](/docs/how_to/tools_builtin).
diff --git a/langchain_md_files/integrations/providers/gooseai.mdx b/langchain_md_files/integrations/providers/gooseai.mdx
deleted file mode 100644
index f0bdf819d2ef7d34a55ad0284789e45afa31e459..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/gooseai.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
-# GooseAI
-
->[GooseAI](https://goose.ai) makes deploying NLP services easier and more accessible. 
-> `GooseAI` is a fully managed inference service delivered via API. 
-> With feature parity to other well known APIs, `GooseAI` delivers a plug-and-play solution 
-> for serving open source language models at the industry's best economics by simply 
-> changing 2 lines in your code.
-
-## Installation and Setup
-
-- Install the Python SDK with `pip install openai`
-- Get your GooseAI api key from this link [here](https://goose.ai/).
-- Set the environment variable (`GOOSEAI_API_KEY`).
-
-```python
-import os
-os.environ["GOOSEAI_API_KEY"] = "YOUR_API_KEY"
-```
-
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/gooseai).
-
-```python
-from langchain_community.llms import GooseAI
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/gpt4all.mdx b/langchain_md_files/integrations/providers/gpt4all.mdx
deleted file mode 100644
index 9e3b188328e02178afb0affd9bb9d3ee8e21cdcd..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/gpt4all.mdx
+++ /dev/null
@@ -1,55 +0,0 @@
-# GPT4All
-
-This page covers how to use the `GPT4All` wrapper within LangChain. The tutorial is divided into two parts: installation and setup, followed by usage with an example.
-
-## Installation and Setup
-
-- Install the Python package with `pip install gpt4all`
-- Download a [GPT4All model](https://gpt4all.io/index.html) and place it in your desired directory
-
-In this example, we are using `mistral-7b-openorca.Q4_0.gguf`:
-
-```bash
-mkdir models
-wget https://gpt4all.io/models/gguf/mistral-7b-openorca.Q4_0.gguf -O models/mistral-7b-openorca.Q4_0.gguf
-```
-
-## Usage
-
-### GPT4All
-
-To use the GPT4All wrapper, you need to provide the path to the pre-trained model file and the model's configuration.
-
-```python
-from langchain_community.llms import GPT4All
-
-# Instantiate the model. Callbacks support token-wise streaming
-model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8)
-
-# Generate text
-response = model.invoke("Once upon a time, ")
-```
-
-You can also customize the generation parameters, such as `n_predict`, `temp`, `top_p`, `top_k`, and others.
-
-To stream the model's predictions, add in a CallbackManager.
-
-```python
-from langchain_community.llms import GPT4All
-from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
-
-# There are many CallbackHandlers supported, such as
-# from langchain.callbacks.streamlit import StreamlitCallbackHandler
-
-callbacks = [StreamingStdOutCallbackHandler()]
-model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8)
-
-# Generate text. Tokens are streamed through the callback manager.
-model.invoke("Once upon a time, ", callbacks=callbacks)
-```
-
-## Model File
-
-You can download model files from the GPT4All client. You can download the client from the [GPT4All](https://gpt4all.io/index.html) website.
-
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/gpt4all)
diff --git a/langchain_md_files/integrations/providers/gradient.mdx b/langchain_md_files/integrations/providers/gradient.mdx
deleted file mode 100644
index 37cd04e91ec5693f9af000f7e689480bb0eec016..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/gradient.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
-# Gradient
-
->[Gradient](https://gradient.ai/) allows to fine tune and get completions on LLMs with a simple web API.
-
-## Installation and Setup
-- Install the Python SDK :
-```bash
-pip install gradientai
-```
-Get a [Gradient access token and workspace](https://gradient.ai/) and set it as an environment variable (`Gradient_ACCESS_TOKEN`) and (`GRADIENT_WORKSPACE_ID`)
-
-## LLM
-
-There exists an Gradient LLM wrapper, which you can access with 
-See a [usage example](/docs/integrations/llms/gradient).
-
-```python
-from langchain_community.llms import GradientLLM
-```
-
-## Text Embedding Model
-
-There exists an Gradient Embedding model, which you can access with 
-```python
-from langchain_community.embeddings import GradientEmbeddings
-```
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/gradient)
diff --git a/langchain_md_files/integrations/providers/graph_rag.mdx b/langchain_md_files/integrations/providers/graph_rag.mdx
deleted file mode 100644
index c2561ef0c7fd19ccd1a34991d7ad6adf61ffbbe9..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/graph_rag.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Graph RAG
-
-## Overview
-
-[Graph RAG](https://datastax.github.io/graph-rag/) provides a retriever interface
-that combines **unstructured** similarity search on vectors with **structured**
-traversal of metadata properties. This enables graph-based retrieval over **existing**
-vector stores.
-
-## Installation and setup
-
-```bash
-pip install langchain-graph-retriever
-```
-
-## Retrievers
-
-```python
-from langchain_graph_retriever import GraphRetriever
-```
-
-For more information, see the [Graph RAG Integration Guide](/docs/integrations/retrievers/graph_rag).
diff --git a/langchain_md_files/integrations/providers/graphsignal.mdx b/langchain_md_files/integrations/providers/graphsignal.mdx
deleted file mode 100644
index 6e4867d35794baa722a5c7970916b5987ac8d97b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/graphsignal.mdx
+++ /dev/null
@@ -1,44 +0,0 @@
-# Graphsignal
-
-This page covers how to use [Graphsignal](https://app.graphsignal.com) to trace and monitor LangChain. Graphsignal enables full visibility into your application. It provides latency breakdowns by chains and tools, exceptions with full context, data monitoring, compute/GPU utilization, OpenAI cost analytics, and more.
-
-## Installation and Setup
-
-- Install the Python library with `pip install graphsignal`
-- Create free Graphsignal account [here](https://graphsignal.com)
-- Get an API key and set it as an environment variable (`GRAPHSIGNAL_API_KEY`)
-
-## Tracing and Monitoring
-
-Graphsignal automatically instruments and starts tracing and monitoring chains. Traces and metrics are then available in your [Graphsignal dashboards](https://app.graphsignal.com).
-
-Initialize the tracer by providing a deployment name:
-
-```python
-import graphsignal
-
-graphsignal.configure(deployment='my-langchain-app-prod')
-```
-
-To additionally trace any function or code, you can use a decorator or a context manager:
-
-```python
-@graphsignal.trace_function
-def handle_request():    
-    chain.run("some initial text")
-```
-
-```python
-with graphsignal.start_trace('my-chain'):
-    chain.run("some initial text")
-```
-
-Optionally, enable profiling to record function-level statistics for each trace.
-
-```python
-with graphsignal.start_trace(
-        'my-chain', options=graphsignal.TraceOptions(enable_profiling=True)):
-    chain.run("some initial text")
-```
-
-See the [Quick Start](https://graphsignal.com/docs/guides/quick-start/) guide for complete setup instructions.
diff --git a/langchain_md_files/integrations/providers/grobid.mdx b/langchain_md_files/integrations/providers/grobid.mdx
deleted file mode 100644
index 9740854ed117dab4e8a319217485a509427ea553..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/grobid.mdx
+++ /dev/null
@@ -1,46 +0,0 @@
-# Grobid
-
-GROBID is a machine learning library for extracting, parsing, and re-structuring raw documents.
-
-It is designed and expected to be used to parse academic papers, where it works particularly well.
-
-*Note*: if the articles supplied to Grobid are large documents (e.g. dissertations) exceeding a certain number
-of elements, they might not be processed.
-
-This page covers how to use the Grobid to parse articles for LangChain.
-
-## Installation
-The grobid installation is described in details in https://grobid.readthedocs.io/en/latest/Install-Grobid/.
-However, it is probably easier and less troublesome to run grobid through a docker container,
-as documented [here](https://grobid.readthedocs.io/en/latest/Grobid-docker/).
-
-## Use Grobid with LangChain
-
-Once grobid is installed and up and running (you can check by accessing it http://localhost:8070),
-you're ready to go.
-
-You can now use the GrobidParser to produce documents
-```python
-from langchain_community.document_loaders.parsers import GrobidParser
-from langchain_community.document_loaders.generic import GenericLoader
-
-#Produce chunks from article paragraphs
-loader = GenericLoader.from_filesystem(
-    "/Users/31treehaus/Desktop/Papers/",
-    glob="*",
-    suffixes=[".pdf"],
-    parser= GrobidParser(segment_sentences=False)
-)
-docs = loader.load()
-
-#Produce chunks from article sentences
-loader = GenericLoader.from_filesystem(
-    "/Users/31treehaus/Desktop/Papers/",
-    glob="*",
-    suffixes=[".pdf"],
-    parser= GrobidParser(segment_sentences=True)
-)
-docs = loader.load()
-```
-Chunk metadata will include Bounding Boxes. Although these are a bit funky to parse,
-they are explained in https://grobid.readthedocs.io/en/latest/Coordinates-in-PDF/
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/groq.mdx b/langchain_md_files/integrations/providers/groq.mdx
deleted file mode 100644
index a7bc31364c239ed9747ea486c4e37a0914877a12..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/groq.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
-# Groq
-
->[Groq](https://groq.com)developed the world's first Language Processing Unit™, or `LPU`. 
-> The `Groq LPU` has a deterministic, single core streaming architecture that sets the standard 
-> for GenAI inference speed with predictable and repeatable performance for any given workload.
->
->Beyond the architecture, `Groq` software is designed to empower developers like you with 
-> the tools you need to create innovative, powerful AI applications. 
-> 
->With Groq as your engine, you can:
->* Achieve uncompromised low latency and performance for real-time AI and HPC inferences 🔥
->* Know the exact performance and compute time for any given workload 🔮
->* Take advantage of our cutting-edge technology to stay ahead of the competition 💪
-
-
-## Installation and Setup
-
-Install the integration package:
-
-```bash
-pip install langchain-groq
-```
-
-Request an [API key](https://console.groq.com/login?utm_source=langchain&utm_content=provider_page) and set it as an environment variable:
-
-```bash
-export GROQ_API_KEY=gsk_...
-```
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/groq).
-
-```python
-from langchain_groq import ChatGroq
-```
diff --git a/langchain_md_files/integrations/providers/gutenberg.mdx b/langchain_md_files/integrations/providers/gutenberg.mdx
deleted file mode 100644
index 36eb816383d60a8bc6db5d3061bffe278a908f36..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/gutenberg.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
-# Gutenberg
-
->[Project Gutenberg](https://www.gutenberg.org/about/) is an online library of free eBooks.
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/gutenberg).
-
-```python
-from langchain_community.document_loaders import GutenbergLoader
-```
diff --git a/langchain_md_files/integrations/providers/hacker_news.mdx b/langchain_md_files/integrations/providers/hacker_news.mdx
deleted file mode 100644
index fc232a3db0c687c817327d910d382748e75984f0..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/hacker_news.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
-# Hacker News
-
->[Hacker News](https://en.wikipedia.org/wiki/Hacker_News) (sometimes abbreviated as `HN`) is a social news 
-> website focusing on computer science and entrepreneurship. It is run by the investment fund and startup 
-> incubator `Y Combinator`. In general, content that can be submitted is defined as "anything that gratifies 
-> one's intellectual curiosity."
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/hacker_news).
-
-```python
-from langchain_community.document_loaders import HNLoader
-```
diff --git a/langchain_md_files/integrations/providers/hazy_research.mdx b/langchain_md_files/integrations/providers/hazy_research.mdx
deleted file mode 100644
index 13cbda6b8ee52f0700a9f260cccac5a310850501..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/hazy_research.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Hazy Research
-
-This page covers how to use the Hazy Research ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific Hazy Research wrappers.
-
-## Installation and Setup
-- To use the `manifest`, install it with `pip install manifest-ml`
-
-## Wrappers
-
-### LLM
-
-There exists an LLM wrapper around Hazy Research's `manifest` library. 
-`manifest` is a python library which is itself a wrapper around many model providers, and adds in caching, history, and more.
-
-To use this wrapper:
-```python
-from langchain_community.llms.manifest import ManifestWrapper
-```
diff --git a/langchain_md_files/integrations/providers/helicone.mdx b/langchain_md_files/integrations/providers/helicone.mdx
deleted file mode 100644
index 9f2898870b365ead07427583f13be5d0f68071d5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/helicone.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
-# Helicone
-
-This page covers how to use the [Helicone](https://helicone.ai) ecosystem within LangChain.
-
-## What is Helicone?
-
-Helicone is an [open-source](https://github.com/Helicone/helicone) observability platform that proxies your OpenAI traffic and provides you key insights into your spend, latency and usage.
-
-![Screenshot of the Helicone dashboard showing average requests per day, response time, tokens per response, total cost, and a graph of requests over time.](/img/HeliconeDashboard.png "Helicone Dashboard")
-
-## Quick start
-
-With your LangChain environment you can just add the following parameter.
-
-```bash
-export OPENAI_API_BASE="https://oai.hconeai.com/v1"
-```
-
-Now head over to [helicone.ai](https://www.helicone.ai/signup) to create your account, and add your OpenAI API key within our dashboard to view your logs.
-
-![Interface for entering and managing OpenAI API keys in the Helicone dashboard.](/img/HeliconeKeys.png "Helicone API Key Input")
-
-## How to enable Helicone caching
-
-```python
-from langchain_openai import OpenAI
-import openai
-openai.api_base = "https://oai.hconeai.com/v1"
-
-llm = OpenAI(temperature=0.9, headers={"Helicone-Cache-Enabled": "true"})
-text = "What is a helicone?"
-print(llm.invoke(text))
-```
-
-[Helicone caching docs](https://docs.helicone.ai/advanced-usage/caching)
-
-## How to use Helicone custom properties
-
-```python
-from langchain_openai import OpenAI
-import openai
-openai.api_base = "https://oai.hconeai.com/v1"
-
-llm = OpenAI(temperature=0.9, headers={
-        "Helicone-Property-Session": "24",
-        "Helicone-Property-Conversation": "support_issue_2",
-        "Helicone-Property-App": "mobile",
-      })
-text = "What is a helicone?"
-print(llm.invoke(text))
-```
-
-[Helicone property docs](https://docs.helicone.ai/advanced-usage/custom-properties)
diff --git a/langchain_md_files/integrations/providers/hologres.mdx b/langchain_md_files/integrations/providers/hologres.mdx
deleted file mode 100644
index 8dbb3d80faa67bab84f4e70981147535b460ba6f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/hologres.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-# Hologres
-
->[Hologres](https://www.alibabacloud.com/help/en/hologres/latest/introduction) is a unified real-time data warehousing service developed by Alibaba Cloud. You can use Hologres to write, update, process, and analyze large amounts of data in real time. 
->`Hologres` supports standard `SQL` syntax, is compatible with `PostgreSQL`, and supports most PostgreSQL functions. Hologres supports online analytical processing (OLAP) and ad hoc analysis for up to petabytes of data, and provides high-concurrency and low-latency online data services. 
-
->`Hologres` provides **vector database** functionality by adopting [Proxima](https://www.alibabacloud.com/help/en/hologres/latest/vector-processing).
->`Proxima` is a high-performance software library developed by `Alibaba DAMO Academy`. It allows you to search for the nearest neighbors of vectors. Proxima provides higher stability and performance than similar open-source software such as Faiss. Proxima allows you to search for similar text or image embeddings with high throughput and low latency. Hologres is deeply integrated with Proxima to provide a high-performance vector search service.
-
-## Installation and Setup
-
-Click [here](https://www.alibabacloud.com/zh/product/hologres) to fast deploy a Hologres cloud instance.
-
-```bash
-pip install hologres-vector
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/hologres).
-
-```python
-from langchain_community.vectorstores import Hologres
-```
diff --git a/langchain_md_files/integrations/providers/html2text.mdx b/langchain_md_files/integrations/providers/html2text.mdx
deleted file mode 100644
index c8cf35210fff72525fe00189526e4975f97cdcf6..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/html2text.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# HTML to text
-
->[html2text](https://github.com/Alir3z4/html2text/) is a Python package that converts a page of `HTML` into clean, easy-to-read plain `ASCII text`. 
-
-The ASCII also happens to be a valid `Markdown` (a text-to-HTML format).
-
-## Installation and Setup
-
-```bash
-pip install html2text
-```
-
-## Document Transformer
-
-See a [usage example](/docs/integrations/document_transformers/html2text).
-
-```python
-from langchain_community.document_loaders import Html2TextTransformer
-```
diff --git a/langchain_md_files/integrations/providers/huawei.mdx b/langchain_md_files/integrations/providers/huawei.mdx
deleted file mode 100644
index 22b12ca717f7c3e9e8e38670f32982f8794aa6f2..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/huawei.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
-# Huawei
-
->[Huawei Technologies Co., Ltd.](https://www.huawei.com/) is a Chinese multinational 
-> digital communications technology corporation.
-> 
->[Huawei Cloud](https://www.huaweicloud.com/intl/en-us/product/) provides a comprehensive suite of 
-> global cloud computing services. 
- 
-
-## Installation and Setup
-
-To access the `Huawei Cloud`, you need an access token.
-
-You also have to install a python library:
-
-```bash
-pip install -U esdk-obs-python
-```
-
-
-## Document Loader
-
-### Huawei OBS Directory
-
-See a [usage example](/docs/integrations/document_loaders/huawei_obs_directory).
-
-```python
-from langchain_community.document_loaders import OBSDirectoryLoader
-```
-
-### Huawei OBS File
-
-See a [usage example](/docs/integrations/document_loaders/huawei_obs_file).
-
-```python
-from langchain_community.document_loaders.obs_file import OBSFileLoader
-```
diff --git a/langchain_md_files/integrations/providers/huggingface.mdx b/langchain_md_files/integrations/providers/huggingface.mdx
deleted file mode 100644
index da7d39c1c7a52452d4a24ec6f5f57f9ec4aa84d8..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/huggingface.mdx
+++ /dev/null
@@ -1,168 +0,0 @@
-# Hugging Face
-
-All functionality related to the [Hugging Face Platform](https://huggingface.co/).
-
-## Installation
-
-Most of the Hugging Face integrations are available in the `langchain-huggingface` package.
-
-```bash
-pip install langchain-huggingface
-```
-
-## Chat models
-
-### ChatHuggingFace
-
-We can use the `Hugging Face` LLM classes or directly use the `ChatHuggingFace` class.
-
-See a [usage example](/docs/integrations/chat/huggingface).
-
-```python
-from langchain_huggingface import ChatHuggingFace
-```
-
-## LLMs
-
-### HuggingFaceEndpoint
-
-
-See a [usage example](/docs/integrations/llms/huggingface_endpoint).
-
-```python
-from langchain_huggingface import HuggingFaceEndpoint
-```
-
-### HuggingFacePipeline
-
-Hugging Face models can be run locally through the `HuggingFacePipeline` class.
-
-See a [usage example](/docs/integrations/llms/huggingface_pipelines).
-
-```python
-from langchain_huggingface import HuggingFacePipeline
-```
-
-## Embedding Models
-
-### HuggingFaceEmbeddings
-
-See a [usage example](/docs/integrations/text_embedding/huggingfacehub).
-
-```python
-from langchain_huggingface import HuggingFaceEmbeddings
-```
-
-### HuggingFaceEndpointEmbeddings
-
-See a [usage example](/docs/integrations/text_embedding/huggingfacehub).
-
-```python
-from langchain_huggingface import HuggingFaceEndpointEmbeddings
-```
-
-### HuggingFaceInferenceAPIEmbeddings
-
-See a [usage example](/docs/integrations/text_embedding/huggingfacehub).
-
-```python
-from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
-```
-
-### HuggingFaceInstructEmbeddings
-
-See a [usage example](/docs/integrations/text_embedding/instruct_embeddings).
-
-```python
-from langchain_community.embeddings import HuggingFaceInstructEmbeddings
-```
-
-### HuggingFaceBgeEmbeddings
-
->[BGE models on the HuggingFace](https://huggingface.co/BAAI/bge-large-en-v1.5) are one of [the best open-source embedding models](https://huggingface.co/spaces/mteb/leaderboard).
->BGE model is created by the [Beijing Academy of Artificial Intelligence (BAAI)](https://en.wikipedia.org/wiki/Beijing_Academy_of_Artificial_Intelligence). `BAAI` is a private non-profit organization engaged in AI research and development.
-
-See a [usage example](/docs/integrations/text_embedding/bge_huggingface).
-
-```python
-from langchain_community.embeddings import HuggingFaceBgeEmbeddings
-```
-
-## Document Loaders
-
-### Hugging Face dataset
-
->[Hugging Face Hub](https://huggingface.co/docs/hub/index) is home to over 75,000
-> [datasets](https://huggingface.co/docs/hub/index#datasets) in more than 100 languages
-> that can be used for a broad range of tasks across NLP, Computer Vision, and Audio.
-> They used for a diverse range of tasks such as translation, automatic speech
-> recognition, and image classification.
-
-We need to install `datasets` python package.
-
-```bash
-pip install datasets
-```
-
-See a [usage example](/docs/integrations/document_loaders/hugging_face_dataset).
-
-```python
-from langchain_community.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader
-```
-
-### Hugging Face model loader
-
->Load model information from `Hugging Face Hub`, including README content.
->
->This loader interfaces with the `Hugging Face Models API` to fetch 
-> and load model metadata and README files. 
-> The API allows you to search and filter models based on 
-> specific criteria such as model tags, authors, and more.
-
-```python
-from langchain_community.document_loaders import HuggingFaceModelLoader
-```
-
-### Image captions
-
-It uses the Hugging Face models to generate image captions.
-
-We need to install several python packages.
-
-```bash
-pip install transformers pillow
-```
-
-See a [usage example](/docs/integrations/document_loaders/image_captions).
-
-```python
-from langchain_community.document_loaders import ImageCaptionLoader
-```
-
-## Tools
-
-### Hugging Face Hub Tools
-
->[Hugging Face Tools](https://huggingface.co/docs/transformers/v4.29.0/en/custom_tools)
-> support text I/O and are loaded using the `load_huggingface_tool` function.
-
-We need to install several python packages.
-
-```bash
-pip install transformers huggingface_hub
-```
-
-See a [usage example](/docs/integrations/tools/huggingface_tools).
-
-```python
-from langchain_community.agent_toolkits.load_tools import load_huggingface_tool
-```
-
-### Hugging Face Text-to-Speech Model Inference.
-
-> It is a wrapper around `OpenAI Text-to-Speech API`.
-
-```python
-from langchain_community.tools.audio import HuggingFaceTextToSpeechModelInference
-```
-
diff --git a/langchain_md_files/integrations/providers/hyperbrowser.mdx b/langchain_md_files/integrations/providers/hyperbrowser.mdx
deleted file mode 100644
index 6efbd47a2eb0c8387f96377090be25f7a278d509..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/hyperbrowser.mdx
+++ /dev/null
@@ -1,67 +0,0 @@
-# Hyperbrowser
-
-> [Hyperbrowser](https://hyperbrowser.ai) is a platform for running and scaling headless browsers. It lets you launch and manage browser sessions at scale and provides easy to use solutions for any webscraping needs, such as scraping a single page or crawling an entire site.
->
-> Key Features:
->
-> - Instant Scalability - Spin up hundreds of browser sessions in seconds without infrastructure headaches
-> - Simple Integration - Works seamlessly with popular tools like Puppeteer and Playwright
-> - Powerful APIs - Easy to use APIs for scraping/crawling any site, and much more
-> - Bypass Anti-Bot Measures - Built-in stealth mode, ad blocking, automatic CAPTCHA solving, and rotating proxies
-
-For more information about Hyperbrowser, please visit the [Hyperbrowser website](https://hyperbrowser.ai) or if you want to check out the docs, you can visit the [Hyperbrowser docs](https://docs.hyperbrowser.ai).
-
-## Installation and Setup
-
-To get started with `langchain-hyperbrowser`, you can install the package using pip:
-
-```bash
-pip install langchain-hyperbrowser
-```
-
-And you should configure credentials by setting the following environment variables:
-
-`HYPERBROWSER_API_KEY=<your-api-key>`
-
-Make sure to get your API Key from https://app.hyperbrowser.ai/
-
-## Document Loader
-
-The `HyperbrowserLoader` class in `langchain-hyperbrowser` can easily be used to load content from any single page or multiple pages as well as crawl an entire site.
-The content can be loaded as markdown or html.
-
-```python
-from langchain_hyperbrowser import HyperbrowserLoader
-
-loader = HyperbrowserLoader(urls="https://example.com")
-docs = loader.load()
-
-print(docs[0])
-```
-
-## Advanced Usage
-
-You can specify the operation to be performed by the loader. The default operation is `scrape`. For `scrape`, you can provide a single URL or a list of URLs to be scraped. For `crawl`, you can only provide a single URL. The `crawl` operation will crawl the provided page and subpages and return a document for each page.
-
-```python
-loader = HyperbrowserLoader(
-  urls="https://hyperbrowser.ai", api_key="YOUR_API_KEY", operation="crawl"
-)
-```
-
-Optional params for the loader can also be provided in the `params` argument. For more information on the supported params, visit https://docs.hyperbrowser.ai/reference/sdks/python/scrape#start-scrape-job-and-wait or https://docs.hyperbrowser.ai/reference/sdks/python/crawl#start-crawl-job-and-wait.
-
-```python
-loader = HyperbrowserLoader(
-  urls="https://example.com",
-  api_key="YOUR_API_KEY",
-  operation="scrape",
-  params={"scrape_options": {"include_tags": ["h1", "h2", "p"]}}
-)
-```
-
-## Additional Resources
-
-- [Hyperbrowser Docs](https://docs.hyperbrowser.ai/)
-- [GitHub](https://github.com/hyperbrowserai/langchain-hyperbrowser/)
-- [PyPi](https://pypi.org/project/langchain-hyperbrowser/)
diff --git a/langchain_md_files/integrations/providers/ibm.mdx b/langchain_md_files/integrations/providers/ibm.mdx
deleted file mode 100644
index c98df29e434e674a45a80ad088edc1f25ae5db98..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ibm.mdx
+++ /dev/null
@@ -1,69 +0,0 @@
-# IBM
-
-The `LangChain` integrations related to [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai) platform.
-
-IBM® watsonx.ai™ AI studio is part of the IBM [watsonx](https://www.ibm.com/watsonx)™ AI and data platform, bringing together new generative 
-AI capabilities powered by [foundation models](https://www.ibm.com/products/watsonx-ai/foundation-models) and traditional machine learning (ML) 
-into a powerful studio spanning the AI lifecycle. Tune and guide models with your enterprise data to meet your needs with easy-to-use tools for 
-building and refining performant prompts. With watsonx.ai, you can build AI applications in a fraction of the time and with a fraction of the data. 
-Watsonx.ai offers:
-
-- **Multi-model variety and flexibility:** Choose from IBM-developed, open-source and third-party models, or build your own model.
-- **Differentiated client protection:** IBM stands behind IBM-developed models and indemnifies the client against third-party IP claims.
-- **End-to-end AI governance:** Enterprises can scale and accelerate the impact of AI with trusted data across the business, using data wherever it resides.
-- **Hybrid, multi-cloud deployments:** IBM provides the flexibility to integrate and deploy your AI workloads into your hybrid-cloud stack of choice.
-
-
-## Installation and Setup
-
-Install the integration package with
-```bash
-pip install -qU langchain-ibm
-```
-
-Get an IBM watsonx.ai api key and set it as an environment variable (`WATSONX_APIKEY`)
-```python
-import os
-
-os.environ["WATSONX_APIKEY"] = "your IBM watsonx.ai api key"
-```
-
-## Chat Model
-
-### ChatWatsonx
-
-See a [usage example](/docs/integrations/chat/ibm_watsonx).
-
-```python
-from langchain_ibm import ChatWatsonx
-```
-
-## LLMs
-
-### WatsonxLLM
-
-See a [usage example](/docs/integrations/llms/ibm_watsonx).
-
-```python
-from langchain_ibm import WatsonxLLM
-```
-
-## Embedding Models
-
-### WatsonxEmbeddings
-
-See a [usage example](/docs/integrations/text_embedding/ibm_watsonx).
-
-```python
-from langchain_ibm import WatsonxEmbeddings
-```
-
-## Reranker
-
-### WatsonxRerank
-
-See a [usage example](/docs/integrations/retrievers/ibm_watsonx_ranker).
-
-```python
-from langchain_ibm import WatsonxRerank
-```
diff --git a/langchain_md_files/integrations/providers/ieit_systems.mdx b/langchain_md_files/integrations/providers/ieit_systems.mdx
deleted file mode 100644
index d81d0be3f74844597a097dcd29c63caccb190dc0..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ieit_systems.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# IEIT Systems
-
->[IEIT Systems](https://en.ieisystem.com/) is a Chinese information technology company 
-> established in 1999. It provides the IT infrastructure products, solutions, 
-> and services, innovative IT products and solutions across cloud computing, 
-> big data, and artificial intelligence.
-
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/yuan2).
-
-```python
-from langchain_community.llms.yuan2 import Yuan2
-```
-
-## Chat models
-
-See the [installation instructions](/docs/integrations/chat/yuan2/#setting-up-your-api-server).
-
-Yuan2.0 provided an OpenAI compatible API, and ChatYuan2 is integrated into langchain by using `OpenAI client`. 
-Therefore, ensure the `openai` package is installed.
-
-```bash
-pip install openai
-```
-See a [usage example](/docs/integrations/chat/yuan2).
-
-```python
-from langchain_community.chat_models import ChatYuan2
-```
diff --git a/langchain_md_files/integrations/providers/ifixit.mdx b/langchain_md_files/integrations/providers/ifixit.mdx
deleted file mode 100644
index fdcb4ba8023153f0d4ac1cc11705b18a5f8dc126..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ifixit.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-# iFixit
-
->[iFixit](https://www.ifixit.com) is the largest, open repair community on the web. The site contains nearly 100k 
-> repair manuals, 200k Questions & Answers on 42k devices, and all the data is licensed under `CC-BY-NC-SA 3.0`.
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/ifixit).
-
-```python
-from langchain_community.document_loaders import IFixitLoader
-```
diff --git a/langchain_md_files/integrations/providers/iflytek.mdx b/langchain_md_files/integrations/providers/iflytek.mdx
deleted file mode 100644
index 9852830511cf0b4438d8c3d3811f4f76dc17c5af..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/iflytek.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
-# iFlytek
-
->[iFlytek](https://www.iflytek.com) is a Chinese information technology company 
-> established in 1999. It creates voice recognition software and 
-> voice-based internet/mobile products covering education, communication, 
-> music, intelligent toys industries.
-
-
-## Installation and Setup
-
-- Get `SparkLLM` app_id, api_key and api_secret from [iFlyTek SparkLLM API Console](https://console.xfyun.cn/services/bm3) (for more info, see [iFlyTek SparkLLM Intro](https://xinghuo.xfyun.cn/sparkapi)).
-- Install the Python package (not for the embedding models):
-
-```bash
-pip install websocket-client
-```
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/sparkllm).
-
-```python
-from langchain_community.llms import SparkLLM
-```
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/sparkllm).
-
-```python
-from langchain_community.chat_models import ChatSparkLLM
-```
-
-## Embedding models
-
-```python
-from langchain_community.embeddings import SparkLLMTextEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/imsdb.mdx b/langchain_md_files/integrations/providers/imsdb.mdx
deleted file mode 100644
index 8b30a2dea980988f87def38adecfb5390c18bfed..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/imsdb.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-# IMSDb
-
->[IMSDb](https://imsdb.com/) is the `Internet Movie Script Database`.
-> 
-## Installation and Setup
-
-There isn't any special setup for it.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/imsdb).
-
-
-```python
-from langchain_community.document_loaders import IMSDbLoader
-```
diff --git a/langchain_md_files/integrations/providers/infinispanvs.mdx b/langchain_md_files/integrations/providers/infinispanvs.mdx
deleted file mode 100644
index b42e7504231bfa0ca334a7982e7cc9760f773b8d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/infinispanvs.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Infinispan VS
-
-> [Infinispan](https://infinispan.org) Infinispan is an open-source in-memory data grid that provides
-> a key/value data store able to hold all types of data, from Java objects to plain text.
-> Since version 15 Infinispan supports vector search over caches.
-
-## Installation and Setup
-See [Get Started](https://infinispan.org/get-started/) to run an Infinispan server, you may want to disable authentication
-(not supported atm)
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/infinispanvs).
-
-```python
-from langchain_community.vectorstores import InfinispanVS
-```
diff --git a/langchain_md_files/integrations/providers/infinity.mdx b/langchain_md_files/integrations/providers/infinity.mdx
deleted file mode 100644
index 887a8584036fefe61274d1bb6874047ae873e63d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/infinity.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
-# Infinity
-
->[Infinity](https://github.com/michaelfeil/infinity) allows the creation of text embeddings.
-
-## Text Embedding Model
-
-There exists an infinity Embedding model, which you can access with 
-```python
-from langchain_community.embeddings import InfinityEmbeddings
-```
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/infinity)
diff --git a/langchain_md_files/integrations/providers/infino.mdx b/langchain_md_files/integrations/providers/infino.mdx
deleted file mode 100644
index d11c502a3777c97c2a1e26a7cb6f118fff5a949c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/infino.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
-# Infino
-
->[Infino](https://github.com/infinohq/infino) is an open-source observability platform that stores both metrics and application logs together.
-
-Key features of `Infino` include:
-- **Metrics Tracking**: Capture time taken by LLM model to handle request, errors, number of tokens, and costing indication for the particular LLM.
-- **Data Tracking**: Log and store prompt, request, and response data for each LangChain interaction.
-- **Graph Visualization**: Generate basic graphs over time, depicting metrics such as request duration, error occurrences, token count, and cost.
-
-## Installation and Setup
-
-First, you'll need to install the  `infinopy` Python package as follows:
-
-```bash
-pip install infinopy
-```
-
-If you already have an `Infino Server` running, then you're good to go; but if
-you don't, follow the next steps to start it:
-
-- Make sure you have Docker installed
-- Run the following in your terminal:
-    ```
-    docker run --rm --detach --name infino-example -p 3000:3000 infinohq/infino:latest
-    ```
-
-
-
-## Using Infino
-
-See a [usage example of `InfinoCallbackHandler`](/docs/integrations/callbacks/infino).
-
-```python
-from langchain.callbacks import InfinoCallbackHandler
-```
diff --git a/langchain_md_files/integrations/providers/intel.mdx b/langchain_md_files/integrations/providers/intel.mdx
deleted file mode 100644
index 9429d986c070399dc59cae8e010c0649318168f2..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/intel.mdx
+++ /dev/null
@@ -1,108 +0,0 @@
-# Intel
-
->[Optimum Intel](https://github.com/huggingface/optimum-intel?tab=readme-ov-file#optimum-intel) is the interface between the 🤗 Transformers and Diffusers libraries and the different tools and libraries provided by Intel to accelerate end-to-end pipelines on Intel architectures.
-
->[Intel® Extension for Transformers](https://github.com/intel/intel-extension-for-transformers?tab=readme-ov-file#intel-extension-for-transformers) (ITREX) is an innovative toolkit designed to accelerate GenAI/LLM everywhere with the optimal performance of Transformer-based models on various Intel platforms, including Intel Gaudi2, Intel CPU, and Intel GPU.
-
-This page covers how to use optimum-intel and ITREX with LangChain.
-
-## Optimum-intel
-
-All functionality related to the [optimum-intel](https://github.com/huggingface/optimum-intel.git) and [IPEX](https://github.com/intel/intel-extension-for-pytorch).
-
-### Installation
-
-Install using optimum-intel and ipex using:
-
-```bash
-pip install optimum[neural-compressor]
-pip install intel_extension_for_pytorch
-```
-
-Please follow the installation instructions as specified below:
-
-* Install optimum-intel as shown [here](https://github.com/huggingface/optimum-intel).
-* Install IPEX as shown [here](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=cpu&version=v2.2.0%2Bcpu).
-
-### Embedding Models
-
-See a [usage example](/docs/integrations/text_embedding/optimum_intel).
-We also offer a full tutorial notebook "rag_with_quantized_embeddings.ipynb" for using the embedder in a RAG pipeline in the cookbook dir.
-
-```python
-from langchain_community.embeddings import QuantizedBiEncoderEmbeddings
-```
-
-## Intel® Extension for Transformers (ITREX)
-(ITREX) is an innovative toolkit to accelerate Transformer-based models on Intel platforms, in particular, effective on 4th Intel Xeon Scalable processor Sapphire Rapids (codenamed Sapphire Rapids).
-
-Quantization is a process that involves reducing the precision of these weights by representing them using a smaller number of bits. Weight-only quantization specifically focuses on quantizing the weights of the neural network while keeping other components, such as activations, in their original precision.
-
-As large language models (LLMs) become more prevalent, there is a growing need for new and improved quantization methods that can meet the computational demands of these modern architectures while maintaining the accuracy. Compared to [normal quantization](https://github.com/intel/intel-extension-for-transformers/blob/main/docs/quantization.md) like W8A8, weight only quantization is probably a better trade-off to balance the performance and the accuracy, since we will see below that the bottleneck of deploying LLMs is the memory bandwidth and normally weight only quantization could lead to better accuracy.
-
-Here, we will introduce Embedding Models and Weight-only quantization for Transformers large language models with ITREX. Weight-only quantization is a technique used in deep learning to reduce the memory and computational requirements of neural networks. In the context of deep neural networks, the model parameters, also known as weights, are typically represented using floating-point numbers, which can consume a significant amount of memory and require intensive computational resources.
-
-All functionality related to the [intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers).
-
-### Installation
-
-Install intel-extension-for-transformers. For system requirements and other installation tips, please refer to [Installation Guide](https://github.com/intel/intel-extension-for-transformers/blob/main/docs/installation.md)
-
-```bash
-pip install intel-extension-for-transformers
-```
-Install other required packages.
-
-```bash
-pip install -U torch onnx accelerate datasets
-```
-
-### Embedding Models
-
-See a [usage example](/docs/integrations/text_embedding/itrex).
-
-```python
-from langchain_community.embeddings import QuantizedBgeEmbeddings
-```
-
-### Weight-Only Quantization with ITREX
-
-See a [usage example](/docs/integrations/llms/weight_only_quantization).
-
-## Detail of Configuration Parameters
-
-Here is the detail of the `WeightOnlyQuantConfig` class.
-
-#### weight_dtype (string): Weight Data Type, default is "nf4".
-We support quantize the weights to following data types for storing(weight_dtype in WeightOnlyQuantConfig):
-* **int8**: Uses 8-bit data type.
-* **int4_fullrange**: Uses the -8 value of int4 range compared with the normal int4 range [-7,7].
-* **int4_clip**: Clips and retains the values within the int4 range, setting others to zero.
-* **nf4**: Uses the normalized float 4-bit data type.
-* **fp4_e2m1**: Uses regular float 4-bit data type. "e2" means that 2 bits are used for the exponent, and "m1" means that 1 bits are used for the mantissa.
-
-#### compute_dtype (string): Computing Data Type, Default is "fp32".
-While these techniques store weights in 4 or 8 bit, the computation still happens in float32, bfloat16 or int8(compute_dtype in WeightOnlyQuantConfig):
-* **fp32**: Uses the float32 data type to compute.
-* **bf16**: Uses the bfloat16 data type to compute.
-* **int8**: Uses 8-bit data type to compute.
-
-#### llm_int8_skip_modules (list of module's name): Modules to Skip Quantization, Default is None.
-It is a list of modules to be skipped quantization.
-
-#### scale_dtype (string): The Scale Data Type, Default is "fp32".
-Now only support "fp32"(float32).
-
-#### mse_range (boolean): Whether to Search for The Best Clip Range from Range [0.805, 1.0, 0.005], default is False.
-#### use_double_quant (boolean): Whether to Quantize Scale, Default is False.
-Not support yet.
-#### double_quant_dtype (string): Reserve for Double Quantization.
-#### double_quant_scale_dtype (string): Reserve for Double Quantization.
-#### group_size (int): Group Size When Auantization.
-#### scheme (string): Which Format Weight Be Quantize to. Default is "sym".
-* **sym**: Symmetric.
-* **asym**: Asymmetric.
-#### algorithm (string): Which Algorithm to Improve the Accuracy . Default is "RTN"
-* **RTN**: Round-to-nearest (RTN) is a quantification method that we can think of very intuitively.
-* **AWQ**: Protecting only 1% of salient weights can greatly reduce quantization error. the salient weight channels are selected by observing the distribution of activation and weight per channel. The salient weights are also quantized after multiplying a big scale factor before quantization for preserving. .
-* **TEQ**: A trainable equivalent transformation that preserves the FP32 precision in weight-only quantization.
diff --git a/langchain_md_files/integrations/providers/iugu.mdx b/langchain_md_files/integrations/providers/iugu.mdx
deleted file mode 100644
index 5abbeaa8a0669d11ba52ded55217dd92a44a6f6c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/iugu.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Iugu
-
->[Iugu](https://www.iugu.com/) is a Brazilian services and software as a service (SaaS)
-> company. It offers payment-processing software and application programming 
-> interfaces for e-commerce websites and mobile applications.
- 
-
-## Installation and Setup
-
-The `Iugu API` requires an access token, which can be found inside of the `Iugu` dashboard.
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/iugu).
-
-```python
-from langchain_community.document_loaders import IuguLoader
-```
diff --git a/langchain_md_files/integrations/providers/jaguar.mdx b/langchain_md_files/integrations/providers/jaguar.mdx
deleted file mode 100644
index 839a34ad3269b658f64fb582a602482230fe0b8a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/jaguar.mdx
+++ /dev/null
@@ -1,62 +0,0 @@
-# Jaguar
-
-This page describes how to use Jaguar vector database within LangChain.
-It contains three sections: introduction, installation and setup, and Jaguar API.
-
-
-## Introduction
-
-Jaguar vector database has the following characteristics:
-
-1. It is a distributed vector database
-2. The “ZeroMove” feature of JaguarDB enables instant horizontal scalability
-3. Multimodal: embeddings, text, images, videos, PDFs, audio, time series, and geospatial
-4. All-masters: allows both parallel reads and writes
-5. Anomaly detection capabilities
-6. RAG support: combines LLM with proprietary and real-time data
-7. Shared metadata: sharing of metadata across multiple vector indexes
-8. Distance metrics: Euclidean, Cosine, InnerProduct, Manhatten, Chebyshev, Hamming, Jeccard, Minkowski
-
-[Overview of Jaguar scalable vector database](http://www.jaguardb.com)
-
-You can run JaguarDB in docker container; or download the software and run on-cloud or off-cloud.
-
-## Installation and Setup
-
-- Install the JaguarDB on one host or multiple hosts
-- Install the Jaguar HTTP Gateway server on one host
-- Install the JaguarDB HTTP Client package
-
-The steps are described in [Jaguar Documents](http://www.jaguardb.com/support.html)
-
-Environment Variables in client programs:
-
-    export OPENAI_API_KEY="......"
-    export JAGUAR_API_KEY="......"
-
-  
-## Jaguar API
-
-Together with LangChain, a Jaguar client class is provided by importing it in Python:
-
-```python
-from langchain_community.vectorstores.jaguar import Jaguar
-```
-
-Supported API functions of the Jaguar class are:
-
-- `add_texts`
-- `add_documents`
-- `from_texts`
-- `from_documents`
-- `similarity_search`
-- `is_anomalous`
-- `create`
-- `delete`
-- `clear`
-- `drop`
-- `login`
-- `logout`
-
-
-For more details of the Jaguar API, please refer to [this notebook](/docs/integrations/vectorstores/jaguar)
diff --git a/langchain_md_files/integrations/providers/javelin_ai_gateway.mdx b/langchain_md_files/integrations/providers/javelin_ai_gateway.mdx
deleted file mode 100644
index d678e34597eabac0b1fe0e6cd5010f38a97700ec..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/javelin_ai_gateway.mdx
+++ /dev/null
@@ -1,92 +0,0 @@
-# Javelin AI Gateway
-
-[The Javelin AI Gateway](https://www.getjavelin.io) service is a high-performance, enterprise grade API Gateway for AI applications.  
-It is designed to streamline the usage and access of various large language model (LLM) providers, 
-such as OpenAI, Cohere, Anthropic and custom large language models within an organization by incorporating
-robust access security for all interactions with LLMs. 
-
-Javelin offers a high-level interface that simplifies the interaction with LLMs by providing a unified endpoint 
-to handle specific LLM related requests. 
-
-See the Javelin AI Gateway [documentation](https://docs.getjavelin.io) for more details.  
-[Javelin Python SDK](https://www.github.com/getjavelin/javelin-python) is an easy to use client library meant to be embedded into AI Applications
-
-## Installation and Setup
-
-Install `javelin_sdk` to interact with Javelin AI Gateway:
-
-```sh
-pip install 'javelin_sdk'
-```
-
-Set the Javelin's API key as an environment variable:
-
-```sh
-export JAVELIN_API_KEY=...
-```
-
-## Completions Example
-
-```python
-
-from langchain.chains import LLMChain
-from langchain_community.llms import JavelinAIGateway
-from langchain_core.prompts import PromptTemplate
-
-route_completions = "eng_dept03"
-
-gateway = JavelinAIGateway(
-    gateway_uri="http://localhost:8000",
-    route=route_completions,
-    model_name="text-davinci-003",
-)
-
-llmchain = LLMChain(llm=gateway, prompt=prompt)
-result = llmchain.run("podcast player")
-
-print(result)
-
-```
-
-## Embeddings Example
-
-```python
-from langchain_community.embeddings import JavelinAIGatewayEmbeddings
-from langchain_openai import OpenAIEmbeddings
-
-embeddings = JavelinAIGatewayEmbeddings(
-    gateway_uri="http://localhost:8000",
-    route="embeddings",
-)
-
-print(embeddings.embed_query("hello"))
-print(embeddings.embed_documents(["hello"]))
-```
-
-## Chat Example
-```python
-from langchain_community.chat_models import ChatJavelinAIGateway
-from langchain_core.messages import HumanMessage, SystemMessage
-
-messages = [
-    SystemMessage(
-        content="You are a helpful assistant that translates English to French."
-    ),
-    HumanMessage(
-        content="Artificial Intelligence has the power to transform humanity and make the world a better place"
-    ),
-]
-
-chat = ChatJavelinAIGateway(
-    gateway_uri="http://localhost:8000",
-    route="mychatbot_route",
-    model_name="gpt-3.5-turbo"
-    params={
-        "temperature": 0.1
-    }
-)
-
-print(chat(messages))
-
-```
-
diff --git a/langchain_md_files/integrations/providers/jenkins.mdx b/langchain_md_files/integrations/providers/jenkins.mdx
deleted file mode 100644
index 45e217d2422404f5050745a8c660c9ddc9f353a5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/jenkins.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Jenkins
-
-[Jenkins](https://www.jenkins.io/) is an open-source automation platform that enables
-software teams to streamline their development workflows. It's widely adopted in the
-DevOps community as a tool for automating the building, testing, and deployment of
-applications through CI/CD pipelines.
-
-
-## Installation and Setup
-
-```bash
-pip install langchain-jenkins
-```
-
-## Tools
-
-See detail on available tools [here](/docs/integrations/tools/jenkins).
diff --git a/langchain_md_files/integrations/providers/jina.mdx b/langchain_md_files/integrations/providers/jina.mdx
deleted file mode 100644
index 66fa2464b1f0d4de2429ec3ba33126a0d48402bb..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/jina.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
-# Jina AI
-
->[Jina AI](https://jina.ai/about-us) is a search AI company. `Jina` helps businesses and developers unlock multimodal data with a better search.
-
-:::caution
-For proper compatibility, please ensure you are using the `openai` SDK at version **0.x**.
-:::
-
-## Installation and Setup
-- Get a Jina AI API token from [here](https://jina.ai/embeddings/) and set it as an environment variable (`JINA_API_TOKEN`)
-
-## Chat Models
-
-```python
-from langchain_community.chat_models import JinaChat
-```
-
-See a [usage examples](/docs/integrations/chat/jinachat).
-
-## Embedding Models
-
-You can check the list of available models from [here](https://jina.ai/embeddings/)
-
-```python
-from langchain_community.embeddings import JinaEmbeddings
-```
-
-See a [usage examples](/docs/integrations/text_embedding/jina).
-
-## Document Transformers
-
-### Jina Rerank
-
-```python
-from langchain_community.document_compressors import JinaRerank
-```
-
-See a [usage examples](/docs/integrations/document_transformers/jina_rerank).
-
diff --git a/langchain_md_files/integrations/providers/johnsnowlabs.mdx b/langchain_md_files/integrations/providers/johnsnowlabs.mdx
deleted file mode 100644
index 39f3ea494cbf5d2e6486b4ee66ad9e25d78498ca..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/johnsnowlabs.mdx
+++ /dev/null
@@ -1,117 +0,0 @@
-# Johnsnowlabs
-
-Gain access to the [johnsnowlabs](https://www.johnsnowlabs.com/) ecosystem of enterprise NLP libraries
-with over 21.000 enterprise NLP models in over 200 languages with the open source `johnsnowlabs` library.
-For all 24.000+ models, see the [John Snow Labs Model Models Hub](https://nlp.johnsnowlabs.com/models)
-
-## Installation and Setup
-
-
-```bash
-pip install johnsnowlabs
-```
-
-To [install enterprise features](https://nlp.johnsnowlabs.com/docs/en/jsl/install_licensed_quick, run:
-```python
-# for more details see https://nlp.johnsnowlabs.com/docs/en/jsl/install_licensed_quick
-nlp.install()
-```
-
-
-You can embed your queries and documents with either `gpu`,`cpu`,`apple_silicon`,`aarch` based optimized binaries.
-By default cpu binaries are used.
-Once a session is started, you must restart your notebook to switch between GPU or CPU, or changes will not take effect.
-
-## Embed Query with CPU:
-```python
-document = "foo bar"
-embedding = JohnSnowLabsEmbeddings('embed_sentence.bert')
-output = embedding.embed_query(document)
-```
-
-
-## Embed Query with GPU:
-
-
-```python
-document = "foo bar"
-embedding = JohnSnowLabsEmbeddings('embed_sentence.bert','gpu')
-output = embedding.embed_query(document)
-```
-
-
-
-
-## Embed Query with Apple Silicon (M1,M2,etc..):
-
-```python
-documents = ["foo bar", 'bar foo']
-embedding = JohnSnowLabsEmbeddings('embed_sentence.bert','apple_silicon')
-output = embedding.embed_query(document)
-```
-
-
-
-## Embed Query with AARCH:
-
-```python
-documents = ["foo bar", 'bar foo']
-embedding = JohnSnowLabsEmbeddings('embed_sentence.bert','aarch')
-output = embedding.embed_query(document)
-```
-
-
-
-
-
-
-## Embed Document with CPU:
-```python
-documents = ["foo bar", 'bar foo']
-embedding = JohnSnowLabsEmbeddings('embed_sentence.bert','gpu')
-output = embedding.embed_documents(documents)
-```
-
-
-
-## Embed Document with GPU:
-
-```python
-documents = ["foo bar", 'bar foo']
-embedding = JohnSnowLabsEmbeddings('embed_sentence.bert','gpu')
-output = embedding.embed_documents(documents)
-```
-
-
-
-
-
-## Embed Document with Apple Silicon (M1,M2,etc..):
-
-```python
-
-```python
-documents = ["foo bar", 'bar foo']
-embedding = JohnSnowLabsEmbeddings('embed_sentence.bert','apple_silicon')
-output = embedding.embed_documents(documents)
-```
-
-
-
-## Embed Document with AARCH:
-
-```python
-
-```python
-documents = ["foo bar", 'bar foo']
-embedding = JohnSnowLabsEmbeddings('embed_sentence.bert','aarch')
-output = embedding.embed_documents(documents)
-```
-
-
-
-
-Models are loaded with [nlp.load](https://nlp.johnsnowlabs.com/docs/en/jsl/load_api) and spark session is started with [nlp.start()](https://nlp.johnsnowlabs.com/docs/en/jsl/start-a-sparksession) under the hood.
-
-
-
diff --git a/langchain_md_files/integrations/providers/joplin.mdx b/langchain_md_files/integrations/providers/joplin.mdx
deleted file mode 100644
index b3c83acc5ff57b77a0bd587d40d34cd7e51e8258..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/joplin.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Joplin
-
->[Joplin](https://joplinapp.org/) is an open-source note-taking app. It captures your thoughts 
-> and securely accesses them from any device.
- 
-
-## Installation and Setup
-
-The `Joplin API` requires an access token. 
-You can find installation instructions [here](https://joplinapp.org/api/references/rest_api/).
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/joplin).
-
-```python
-from langchain_community.document_loaders import JoplinLoader
-```
diff --git a/langchain_md_files/integrations/providers/kdbai.mdx b/langchain_md_files/integrations/providers/kdbai.mdx
deleted file mode 100644
index a5f06d0128748f8c51e8a7976f53327ddd67b2b2..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/kdbai.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-# KDB.AI
-
->[KDB.AI](https://kdb.ai) is a powerful knowledge-based vector database and search engine that allows you to build scalable, reliable AI applications, using real-time data, by providing advanced search, recommendation and personalization.
-
-
-## Installation and Setup
-
-Install the Python SDK:
-
-```bash
-pip install kdbai-client
-```
-
-
-## Vector store
-
-There exists a wrapper around KDB.AI indexes, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-```python
-from langchain_community.vectorstores import KDBAI
-```
-
-For a more detailed walkthrough of the KDB.AI vectorstore, see [this notebook](/docs/integrations/vectorstores/kdbai)
diff --git a/langchain_md_files/integrations/providers/kinetica.mdx b/langchain_md_files/integrations/providers/kinetica.mdx
deleted file mode 100644
index df14e1ae8bc24076dc56026995ad464b728d1ad4..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/kinetica.mdx
+++ /dev/null
@@ -1,44 +0,0 @@
-# Kinetica
-
-[Kinetica](https://www.kinetica.com/) is a real-time database purpose built for enabling
-analytics and generative AI on time-series & spatial data.
-
-## Chat Model
-
-The Kinetica LLM wrapper uses the [Kinetica SqlAssist
-LLM](https://docs.kinetica.com/7.2/sql-gpt/concepts/) to transform natural language into
-SQL to simplify the process of data retrieval.
-
-See [Kinetica Language To SQL Chat Model](/docs/integrations/chat/kinetica) for usage.
-
-```python
-from langchain_community.chat_models.kinetica import ChatKinetica
-```
-
-## Vector Store
-
-The Kinetca vectorstore wrapper leverages Kinetica's native support for [vector
-similarity search](https://docs.kinetica.com/7.2/vector_search/).
-
-See [Kinetica Vectorstore API](/docs/integrations/vectorstores/kinetica) for usage.
-
-```python
-from langchain_community.vectorstores import Kinetica
-```
-
-## Document Loader
-
-The Kinetica Document loader can be used to load LangChain [Documents](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) from the
-[Kinetica](https://www.kinetica.com/) database.
-
-See [Kinetica Document Loader](/docs/integrations/document_loaders/kinetica) for usage
-
-```python
-from langchain_community.document_loaders.kinetica_loader import KineticaLoader
-```
-
-## Retriever
-
-The Kinetica Retriever can return documents given an unstructured query.
-
-See [Kinetica VectorStore based Retriever](/docs/integrations/retrievers/kinetica) for usage
diff --git a/langchain_md_files/integrations/providers/koboldai.mdx b/langchain_md_files/integrations/providers/koboldai.mdx
deleted file mode 100644
index 66db8d9df427b97617da172435419c96e1b63894..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/koboldai.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# KoboldAI
-
->[KoboldAI](https://koboldai.com/) is a free, open-source project that allows users to run AI models locally 
-> on their own computer. 
-> It's a browser-based front-end that can be used for writing or role playing with an AI.
->[KoboldAI](https://github.com/KoboldAI/KoboldAI-Client) is a "a browser-based front-end for 
-> AI-assisted writing with multiple local & remote AI models...". 
-> It has a public and local API that can be used in LangChain.
-
-## Installation and Setup
-
-Check out the [installation guide](https://github.com/KoboldAI/KoboldAI-Client).
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/koboldai).
-
-```python
-from langchain_community.llms import KoboldApiLLM
-```
diff --git a/langchain_md_files/integrations/providers/konko.mdx b/langchain_md_files/integrations/providers/konko.mdx
deleted file mode 100644
index c7146778c9665001c24985e92dded13bc592e126..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/konko.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
-# Konko
-All functionality related to Konko
-
->[Konko AI](https://www.konko.ai/) provides a fully managed API to help application developers
-
->1. **Select** the right open source or proprietary LLMs for their application
->2. **Build** applications faster with integrations to leading application frameworks and fully managed APIs
->3. **Fine tune** smaller open-source LLMs to achieve industry-leading performance at a fraction of the cost
->4. **Deploy production-scale APIs** that meet security, privacy, throughput, and latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant, multi-cloud infrastructure
-
-## Installation and Setup
-
-1. Sign in to our web app to [create an API key](https://platform.konko.ai/settings/api-keys) to access models via our endpoints for [chat completions](https://docs.konko.ai/reference/post-chat-completions) and [completions](https://docs.konko.ai/reference/post-completions).
-2. Enable a Python3.8+ environment
-3. Install the SDK
-
-```bash
-pip install konko
-```
-
-4. Set API Keys as environment variables(`KONKO_API_KEY`,`OPENAI_API_KEY`)
-
-```bash
-export KONKO_API_KEY={your_KONKO_API_KEY_here}
-export OPENAI_API_KEY={your_OPENAI_API_KEY_here} #Optional
-```
-
-Please see [the Konko docs](https://docs.konko.ai/docs/getting-started) for more details.
-
-
-## LLM
-
-**Explore Available Models:** Start by browsing through the [available models](https://docs.konko.ai/docs/list-of-models) on Konko. Each model caters to different use cases and capabilities.
-
-Another way to find the list of models running on the Konko instance is through this [endpoint](https://docs.konko.ai/reference/get-models).
-
-See a usage [example](/docs/integrations/llms/konko).
-
-### Examples of Endpoint Usage
-
-- **Completion with mistralai/Mistral-7B-v0.1:**
-
-  ```python
-  from langchain_community.llms import Konko
-  llm = Konko(max_tokens=800, model='mistralai/Mistral-7B-v0.1')
-  prompt = "Generate a Product Description for Apple Iphone 15"
-  response = llm.invoke(prompt)
-  ```
-
-## Chat Models
-
-See a usage [example](/docs/integrations/chat/konko).
-
-
-- **ChatCompletion with Mistral-7B:**
-
-  ```python
-  from langchain_core.messages import HumanMessage
-  from langchain_community.chat_models import ChatKonko
-  chat_instance = ChatKonko(max_tokens=10, model = 'mistralai/mistral-7b-instruct-v0.1')
-  msg = HumanMessage(content="Hi")
-  chat_response = chat_instance([msg])
-  ```
-
-For further assistance, contact [support@konko.ai](mailto:support@konko.ai) or join our [Discord](https://discord.gg/TXV2s3z7RZ).
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/konlpy.mdx b/langchain_md_files/integrations/providers/konlpy.mdx
deleted file mode 100644
index d4d925144e34cfc15f766e7e1e6f9fb35c7792c1..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/konlpy.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# KoNLPY
-
->[KoNLPy](https://konlpy.org/) is a Python package for natural language processing (NLP) 
-> of the Korean language.
-
-
-## Installation and Setup
-
-You need to install the `konlpy` python package.
-
-```bash
-pip install konlpy
-```
-
-## Text splitter
-
-See a [usage example](/docs/how_to/split_by_token/#konlpy).
-
-```python
-from langchain_text_splitters import KonlpyTextSplitter
-```
diff --git a/langchain_md_files/integrations/providers/kuzu.mdx b/langchain_md_files/integrations/providers/kuzu.mdx
deleted file mode 100644
index aff0a0f6689336b04ac5e1532b9cfb722729e93d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/kuzu.mdx
+++ /dev/null
@@ -1,40 +0,0 @@
-# Kùzu
-
-> [Kùzu](https://kuzudb.com/) is an embeddable, scalable, extremely fast graph database.
-> It is permissively licensed with an MIT license, and you can see its source code [here](https://github.com/kuzudb/kuzu).
-
-> Key characteristics of Kùzu:
->- Performance and scalability: Implements modern, state-of-the-art join algorithms for graphs.
->- Usability: Very easy to set up and get started with, as there are no servers (embedded architecture).
->- Interoperability: Can conveniently scan and copy data from external columnar formats, CSV, JSON and relational databases.
->- Structured property graph model: Implements the property graph model, with added structure.
->- Cypher support: Allows convenient querying of the graph in Cypher, a declarative query language.
-
-> Get started with Kùzu by visiting their [documentation](https://docs.kuzudb.com/).
-
-
-## Installation and Setup
-
-Install the Python SDK as follows:
-
-```bash
-pip install -U langchain-kuzu
-```
-
-## Usage
-
-## Graphs
-
-See a [usage example](/docs/integrations/graphs/kuzu_db).
-
-```python
-from langchain_kuzu.graphs.kuzu_graph import KuzuGraph
-```
-
-## Chains
-
-See a [usage example](/docs/integrations/graphs/kuzu_db/#creating-kuzuqachain).
-
-```python
-from langchain_kuzu.chains.graph_qa.kuzu import KuzuQAChain
-```
diff --git a/langchain_md_files/integrations/providers/labelstudio.mdx b/langchain_md_files/integrations/providers/labelstudio.mdx
deleted file mode 100644
index a634f086fca7e233c518ca989ee2313bbc37620a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/labelstudio.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-# Label Studio
-
-
->[Label Studio](https://labelstud.io/guide/get_started) is an open-source data labeling platform that provides LangChain with flexibility when it comes to labeling data for fine-tuning large language models (LLMs). It also enables the preparation of custom training data and the collection and evaluation of responses through human feedback.
-
-## Installation and Setup
-
-See the [Label Studio installation guide](https://labelstud.io/guide/install) for installation options.
-
-We need to install the  `label-studio` and `label-studio-sdk-python` Python packages:
-
-```bash
-pip install label-studio label-studio-sdk
-```
-
-
-## Callbacks
-
-See a [usage example](/docs/integrations/callbacks/labelstudio).
-
-```python
-from langchain.callbacks import LabelStudioCallbackHandler
-```
diff --git a/langchain_md_files/integrations/providers/lakefs.mdx b/langchain_md_files/integrations/providers/lakefs.mdx
deleted file mode 100644
index c38d5bb492827bd823c87fa9fffae68fd393af28..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/lakefs.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
-# lakeFS
-
->[lakeFS](https://docs.lakefs.io/) provides scalable version control over 
-> the data lake, and uses Git-like semantics to create and access those versions. 
-
-## Installation and Setup
-
-Get the `ENDPOINT`, `LAKEFS_ACCESS_KEY`, and `LAKEFS_SECRET_KEY`.
-You can find installation instructions [here](https://docs.lakefs.io/quickstart/launch.html).
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/lakefs).
-
-```python
-from langchain_community.document_loaders import LakeFSLoader
-```
diff --git a/langchain_md_files/integrations/providers/lancedb.mdx b/langchain_md_files/integrations/providers/lancedb.mdx
deleted file mode 100644
index 44440de047ac4ce566a0bca5f86b88b69c157a67..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/lancedb.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-# LanceDB
-
-This page covers how to use [LanceDB](https://github.com/lancedb/lancedb) within LangChain.
-It is broken into two parts: installation and setup, and then references to specific LanceDB wrappers.
-
-## Installation and Setup
-
-- Install the Python SDK with `pip install lancedb`
-
-## Wrappers
-
-### VectorStore
-
-There exists a wrapper around LanceDB databases, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-To import this vectorstore:
-
-```python
-from langchain_community.vectorstores import LanceDB
-```
-
-For a more detailed walkthrough of the LanceDB wrapper, see [this notebook](/docs/integrations/vectorstores/lancedb)
diff --git a/langchain_md_files/integrations/providers/langchain_decorators.mdx b/langchain_md_files/integrations/providers/langchain_decorators.mdx
deleted file mode 100644
index d719f90b2988f70af7bb290d485b34046d0df134..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/langchain_decorators.mdx
+++ /dev/null
@@ -1,370 +0,0 @@
-# LangChain Decorators ✨
-
-~~~
-Disclaimer: `LangChain decorators` is not created by the LangChain team and is not supported by it.
-~~~
-
->`LangChain decorators` is a layer on the top of LangChain that provides syntactic sugar 🍭 for writing custom langchain prompts and chains
->
->For Feedback, Issues, Contributions - please raise an issue here: 
->[ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators)
-
-
-Main principles and benefits:
-
-- more `pythonic` way of writing code
-- write multiline prompts that won't break your code flow with indentation
-- making use of IDE in-built support for **hinting**, **type checking** and **popup with docs** to quickly peek in the function to see the prompt, parameters it consumes etc.
-- leverage all the power of 🦜🔗 LangChain ecosystem
-- adding support for **optional parameters**
-- easily share parameters between the prompts by binding them to one class
-
-
-Here is a simple example of a code written with **LangChain Decorators ✨**
-
-``` python
-
-@llm_prompt
-def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers")->str:
-    """
-    Write me a short header for my post about {topic} for {platform} platform. 
-    It should be for {audience} audience.
-    (Max 15 words)
-    """
-    return
-
-# run it naturally
-write_me_short_post(topic="starwars")
-# or
-write_me_short_post(topic="starwars", platform="redit")
-```
-
-# Quick start
-## Installation
-```bash
-pip install langchain_decorators
-```
-
-## Examples
-
-Good idea on how to start is to review the examples here:
- - [jupyter notebook](https://github.com/ju-bezdek/langchain-decorators/blob/main/example_notebook.ipynb)
- - [colab notebook](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=N4cf__D0E2Yk)
-
-# Defining other parameters
-Here we are just marking a function as a prompt with `llm_prompt` decorator, turning it effectively into a LLMChain. Instead of running it 
-
-
-Standard LLMchain takes much more init parameter than just inputs_variables and prompt... here is this implementation detail hidden in the decorator.
-Here is how it works:
-
-1. Using **Global settings**:
-
-``` python
-# define global settings for all prompty (if not set - chatGPT is the current default)
-from langchain_decorators import GlobalSettings
-
-GlobalSettings.define_settings(
-    default_llm=ChatOpenAI(temperature=0.0), this is default... can change it here globally
-    default_streaming_llm=ChatOpenAI(temperature=0.0,streaming=True), this is default... can change it here for all ... will be used for streaming
-)
-```
-
-2. Using predefined **prompt types**
-
-``` python
-#You can change the default prompt types
-from langchain_decorators import PromptTypes, PromptTypeSettings
-
-PromptTypes.AGENT_REASONING.llm = ChatOpenAI()
-
-# Or you can just define your own ones:
-class MyCustomPromptTypes(PromptTypes):
-    GPT4=PromptTypeSettings(llm=ChatOpenAI(model="gpt-4"))
-
-@llm_prompt(prompt_type=MyCustomPromptTypes.GPT4) 
-def write_a_complicated_code(app_idea:str)->str:
-    ...
-
-```
-
-3.  Define the settings **directly in the decorator**
-
-``` python
-from langchain_openai import OpenAI
-
-@llm_prompt(
-    llm=OpenAI(temperature=0.7),
-    stop_tokens=["\nObservation"],
-    ...
-    )
-def creative_writer(book_title:str)->str:
-    ...
-```
-
-## Passing a memory and/or callbacks:
-
-To pass any of these, just declare them in the function (or use kwargs to pass anything)
-
-```python
-
-@llm_prompt()
-async def write_me_short_post(topic:str, platform:str="twitter", memory:SimpleMemory = None):
-    """
-    {history_key}
-    Write me a short header for my post about {topic} for {platform} platform. 
-    It should be for {audience} audience.
-    (Max 15 words)
-    """
-    pass
-
-await write_me_short_post(topic="old movies")
-
-```
-
-# Simplified streaming
-
-If we want to leverage streaming:
- - we need to define prompt as async function 
- - turn on the streaming on the decorator, or we can define PromptType with streaming on
- - capture the stream using StreamingContext
-
-This way we just mark which prompt should be streamed, not needing to tinker with what LLM should we use, passing around the creating and distribute streaming handler into particular part of our chain... just turn the streaming on/off on prompt/prompt type...
-
-The streaming will happen only if we call it in streaming context ... there we can define a simple function to handle the stream
-
-``` python
-# this code example is complete and should run as it is
-
-from langchain_decorators import StreamingContext, llm_prompt
-
-# this will mark the prompt for streaming (useful if we want stream just some prompts in our app... but don't want to pass distribute the callback handlers)
-# note that only async functions can be streamed (will get an error if it's not)
-@llm_prompt(capture_stream=True) 
-async def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers"):
-    """
-    Write me a short header for my post about {topic} for {platform} platform. 
-    It should be for {audience} audience.
-    (Max 15 words)
-    """
-    pass
-
-
-
-# just an arbitrary  function to demonstrate the streaming... will be some websockets code in the real world
-tokens=[]
-def capture_stream_func(new_token:str):
-    tokens.append(new_token)
-
-# if we want to capture the stream, we need to wrap the execution into StreamingContext... 
-# this will allow us to capture the stream even if the prompt call is hidden inside higher level method
-# only the prompts marked with capture_stream will be captured here
-with StreamingContext(stream_to_stdout=True, callback=capture_stream_func):
-    result = await run_prompt()
-    print("Stream finished ... we can distinguish tokens thanks to alternating colors")
-
-
-print("\nWe've captured",len(tokens),"tokens🎉\n")
-print("Here is the result:")
-print(result)
-```
-
-
-# Prompt declarations
-By default the prompt is is the whole function docs, unless you mark your prompt 
-
-## Documenting your prompt
-
-We can specify what part of our docs is the prompt definition, by specifying a code block with `<prompt>` language tag
-
-``` python
-@llm_prompt
-def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers"):
-    """
-    Here is a good way to write a prompt as part of a function docstring, with additional documentation for devs.
-
-    It needs to be a code block, marked as a `<prompt>` language
-    ```<prompt>
-    Write me a short header for my post about {topic} for {platform} platform. 
-    It should be for {audience} audience.
-    (Max 15 words)
-    ```
-
-    Now only to code block above will be used as a prompt, and the rest of the docstring will be used as a description for developers.
-    (It has also a nice benefit that IDE (like VS code) will display the prompt properly (not trying to parse it as markdown, and thus not showing new lines properly))
-    """
-    return 
-```
-
-## Chat messages prompt
-
-For chat models is very useful to define prompt as a set of message templates... here is how to do it:
-
-``` python
-@llm_prompt
-def simulate_conversation(human_input:str, agent_role:str="a pirate"):
-    """
-    ## System message
-     - note the `:system` suffix inside the <prompt:_role_> tag
-     
-
-    ```<prompt:system>
-    You are a {agent_role} hacker. You mus act like one.
-    You reply always in code, using python or javascript code block...
-    for example:
-    
-    ... do not reply with anything else.. just with code - respecting your role.
-    ```
-
-    # human message 
-    (we are using the real role that are enforced by the LLM - GPT supports system, assistant, user)
-    ``` <prompt:user>
-    Helo, who are you
-    ```
-    a reply:
-    
-
-    ``` <prompt:assistant>
-    \``` python <<- escaping inner code block with \ that should be part of the prompt
-    def hello():
-        print("Argh... hello you pesky pirate")
-    \```
-    ```
-    
-    we can also add some history using placeholder
-    ```<prompt:placeholder>
-    {history}
-    ```
-    ```<prompt:user>
-    {human_input}
-    ```
-
-    Now only to code block above will be used as a prompt, and the rest of the docstring will be used as a description for developers.
-    (It has also a nice benefit that IDE (like VS code) will display the prompt properly (not trying to parse it as markdown, and thus not showing new lines properly))
-    """
-    pass
-
-```
-
-the roles here are model native roles (assistant, user, system for chatGPT)
-
-
-
-# Optional sections
-- you can define a whole sections of your prompt that should be optional
-- if any input in the section is missing, the whole section won't be rendered
-
-the syntax for this is as follows:
-
-``` python
-@llm_prompt
-def prompt_with_optional_partials():
-    """
-    this text will be rendered always, but
-
-    {? anything inside this block will be rendered only if all the {value}s parameters are not empty (None | "")   ?}
-
-    you can also place it in between the words
-    this too will be rendered{? , but
-        this  block will be rendered only if {this_value} and {this_value}
-        is not empty?} !
-    """
-```
-
-
-# Output parsers
-
-- llm_prompt decorator natively tries to detect the best output parser based on the output type. (if not set, it returns the raw string)
-- list, dict and pydantic outputs are also supported natively (automatically)
-
-``` python
-# this code example is complete and should run as it is
-
-from langchain_decorators import llm_prompt
-
-@llm_prompt
-def write_name_suggestions(company_business:str, count:int)->list:
-    """ Write me {count} good name suggestions for company that {company_business}
-    """
-    pass
-
-write_name_suggestions(company_business="sells cookies", count=5)
-```
-
-## More complex structures
-
-for dict / pydantic you need to specify the formatting instructions... 
-this can be tedious, that's why you can let the output parser gegnerate you the instructions based on the model (pydantic)
-
-``` python
-from langchain_decorators import llm_prompt
-from pydantic import BaseModel, Field
-
-
-class TheOutputStructureWeExpect(BaseModel):
-    name:str = Field (description="The name of the company")
-    headline:str = Field( description="The description of the company (for landing page)")
-    employees:list[str] = Field(description="5-8 fake employee names with their positions")
-
-@llm_prompt()
-def fake_company_generator(company_business:str)->TheOutputStructureWeExpect:
-    """ Generate a fake company that {company_business}
-    {FORMAT_INSTRUCTIONS}
-    """
-    return
-
-company = fake_company_generator(company_business="sells cookies")
-
-# print the result nicely formatted
-print("Company name: ",company.name)
-print("company headline: ",company.headline)
-print("company employees: ",company.employees)
-
-```
-
-
-# Binding the prompt to an object
-
-``` python
-from pydantic import BaseModel
-from langchain_decorators import llm_prompt
-
-class AssistantPersonality(BaseModel):
-    assistant_name:str
-    assistant_role:str
-    field:str
-
-    @property
-    def a_property(self):
-        return "whatever"
-
-    def hello_world(self, function_kwarg:str=None):
-        """
-        We can reference any {field} or {a_property} inside our prompt... and combine it with {function_kwarg} in the method
-        """
-
-    
-    @llm_prompt
-    def introduce_your_self(self)->str:
-        """
-        ``` <prompt:system>
-        You are an assistant named {assistant_name}. 
-        Your role is to act as {assistant_role}
-        ```
-        ```<prompt:user>
-        Introduce your self (in less than 20 words)
-        ```
-        """
-
-    
-
-personality = AssistantPersonality(assistant_name="John", assistant_role="a pirate")
-
-print(personality.introduce_your_self(personality))
-```
-
-
-# More examples:
-
-- these and few more examples are also available in the [colab notebook here](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=N4cf__D0E2Yk)
-- including the [ReAct Agent re-implementation](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=3bID5fryE2Yp) using purely langchain decorators
diff --git a/langchain_md_files/integrations/providers/langfair.mdx b/langchain_md_files/integrations/providers/langfair.mdx
deleted file mode 100644
index c140649e985d943b64482e3258320395409203ea..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/langfair.mdx
+++ /dev/null
@@ -1,129 +0,0 @@
-# LangFair: Use-Case Level LLM Bias and Fairness Assessments
-
-LangFair is a comprehensive Python library designed for conducting bias and fairness assessments of large language model (LLM) use cases. The LangFair [repository](https://github.com/cvs-health/langfair) includes a comprehensive framework for [choosing bias and fairness metrics](https://github.com/cvs-health/langfair/tree/main#-choosing-bias-and-fairness-metrics-for-an-llm-use-case), along with [demo notebooks](https://github.com/cvs-health/langfair/tree/main/examples) and a [technical playbook](https://arxiv.org/abs/2407.10853) that discusses LLM bias and fairness risks, evaluation metrics, and best practices. 
-
-Explore our [documentation site](https://cvs-health.github.io/langfair/) for detailed instructions on using LangFair.
-
-## ⚡ Quickstart Guide
-### (Optional) Create a virtual environment for using LangFair
-We recommend creating a new virtual environment using venv before installing LangFair. To do so, please follow instructions [here](https://docs.python.org/3/library/venv.html).
-
-### Installing LangFair
-The latest version can be installed from PyPI:
-
-```bash
-pip install langfair
-```
-
-### Usage Examples
-Below are code samples illustrating how to use LangFair to assess bias and fairness risks in text generation and summarization use cases. The below examples assume the user has already defined a list of prompts from their use case, `prompts`. 
-
-##### Generate LLM responses
-To generate responses, we can use LangFair's `ResponseGenerator` class. First, we must create a `langchain` LLM object. Below we use `ChatVertexAI`, but **any of [LangChain’s LLM classes](https://js.langchain.com/docs/integrations/chat/) may be used instead**. Note that `InMemoryRateLimiter` is to used to avoid rate limit errors.
-```python
-from langchain_google_vertexai import ChatVertexAI
-from langchain_core.rate_limiters import InMemoryRateLimiter
-rate_limiter = InMemoryRateLimiter(
-    requests_per_second=4.5, check_every_n_seconds=0.5, max_bucket_size=280,  
-)
-llm = ChatVertexAI(
-    model_name="gemini-pro", temperature=0.3, rate_limiter=rate_limiter
-)
-```
-We can use `ResponseGenerator.generate_responses` to generate 25 responses for each prompt, as is convention for toxicity evaluation.
-```python
-from langfair.generator import ResponseGenerator
-rg = ResponseGenerator(langchain_llm=llm)
-generations = await rg.generate_responses(prompts=prompts, count=25)
-responses = generations["data"]["response"]
-duplicated_prompts = generations["data"]["prompt"] # so prompts correspond to responses
-```
-
-##### Compute toxicity metrics
-Toxicity metrics can be computed with `ToxicityMetrics`. Note that use of `torch.device` is optional and should be used if GPU is available to speed up toxicity computation.
-```python
-# import torch # uncomment if GPU is available
-# device = torch.device("cuda") # uncomment if GPU is available
-from langfair.metrics.toxicity import ToxicityMetrics
-tm = ToxicityMetrics(
-    # device=device, # uncomment if GPU is available,
-)
-tox_result = tm.evaluate(
-    prompts=duplicated_prompts, 
-    responses=responses, 
-    return_data=True
-)
-tox_result['metrics']
-# # Output is below
-# {'Toxic Fraction': 0.0004,
-# 'Expected Maximum Toxicity': 0.013845130120171235,
-# 'Toxicity Probability': 0.01}
-```
-
-##### Compute stereotype metrics
-Stereotype metrics can be computed with `StereotypeMetrics`.
-```python
-from langfair.metrics.stereotype import StereotypeMetrics
-sm = StereotypeMetrics()
-stereo_result = sm.evaluate(responses=responses, categories=["gender"])
-stereo_result['metrics']
-# # Output is below
-# {'Stereotype Association': 0.3172750176745329,
-# 'Cooccurrence Bias': 0.44766333654278373,
-# 'Stereotype Fraction - gender': 0.08}
-```
-
-##### Generate counterfactual responses and compute metrics
-We can generate counterfactual responses with `CounterfactualGenerator`.
-```python
-from langfair.generator.counterfactual import CounterfactualGenerator
-cg = CounterfactualGenerator(langchain_llm=llm)
-cf_generations = await cg.generate_responses(
-    prompts=prompts, attribute='gender', count=25
-)
-male_responses = cf_generations['data']['male_response']
-female_responses = cf_generations['data']['female_response']
-```
-
-Counterfactual metrics can be easily computed with `CounterfactualMetrics`.
-```python
-from langfair.metrics.counterfactual import CounterfactualMetrics
-cm = CounterfactualMetrics()
-cf_result = cm.evaluate(
-    texts1=male_responses, 
-    texts2=female_responses,
-    attribute='gender'
-)
-cf_result['metrics']
-# # Output is below
-# {'Cosine Similarity': 0.8318708,
-# 'RougeL Similarity': 0.5195852482361165,
-# 'Bleu Similarity': 0.3278433712872481,
-# 'Sentiment Bias': 0.0009947145187601957}
-```
-
-##### Alternative approach: Semi-automated evaluation with `AutoEval`
-To streamline assessments for text generation and summarization use cases, the `AutoEval` class conducts a multi-step process that completes all of the aforementioned steps with two lines of code.
-```python
-from langfair.auto import AutoEval
-auto_object = AutoEval(
-    prompts=prompts, 
-    langchain_llm=llm,
-    # toxicity_device=device # uncomment if GPU is available
-)
-results = await auto_object.evaluate()
-results['metrics']
-# # Output is below
-# {'Toxicity': {'Toxic Fraction': 0.0004,
-#   'Expected Maximum Toxicity': 0.013845130120171235,
-#   'Toxicity Probability': 0.01},
-#  'Stereotype': {'Stereotype Association': 0.3172750176745329,
-#   'Cooccurrence Bias': 0.44766333654278373,
-#   'Stereotype Fraction - gender': 0.08,
-#   'Expected Maximum Stereotype - gender': 0.60355167388916,
-#   'Stereotype Probability - gender': 0.27036},
-#  'Counterfactual': {'male-female': {'Cosine Similarity': 0.8318708,
-#    'RougeL Similarity': 0.5195852482361165,
-#    'Bleu Similarity': 0.3278433712872481,
-#    'Sentiment Bias': 0.0009947145187601957}}}
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/lantern.mdx b/langchain_md_files/integrations/providers/lantern.mdx
deleted file mode 100644
index 9b4a537acfaa7b4d6b32843fcd822fb09ddd3759..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/lantern.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Lantern
-
-This page covers how to use the [Lantern](https://github.com/lanterndata/lantern) within LangChain
-It is broken into two parts: setup, and then references to specific Lantern wrappers.
-
-## Setup
-1. The first step is to create a database with the `lantern` extension installed.
-
-    Follow the steps at [Lantern Installation Guide](https://github.com/lanterndata/lantern#-quick-install) to install the database and the extension. The docker image is the easiest way to get started.
-
-## Wrappers
-
-### VectorStore
-
-There exists a wrapper around Postgres vector databases, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-To import this vectorstore:
-```python
-from langchain_community.vectorstores import Lantern
-```
-
-### Usage
-
-For a more detailed walkthrough of the Lantern Wrapper, see [this notebook](/docs/integrations/vectorstores/lantern)
diff --git a/langchain_md_files/integrations/providers/linkup.mdx b/langchain_md_files/integrations/providers/linkup.mdx
deleted file mode 100644
index ee7f595321746f64e64fb0e6d3a0c4368d407283..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/linkup.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
-# Linkup
-
-> [Linkup](https://www.linkup.so/) provides an API to connect LLMs to the web and the Linkup Premium Partner sources.
-
-## Installation and Setup
-
-To use the Linkup provider, you first need a valid API key, which you can find by signing-up [here](https://app.linkup.so/sign-up).
-You will also need the `langchain-linkup` package, which you can install using pip:
-
-```bash
-pip install langchain-linkup
-```
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/linkup_search).
-
-```python
-from langchain_linkup import LinkupSearchRetriever
-
-retriever = LinkupSearchRetriever(
-    depth="deep",  # "standard" or "deep"
-    linkup_api_key=None,  # API key can be passed here or set as the LINKUP_API_KEY environment variable
-)
-```
-
-## Tools
-
-See a [usage example](/docs/integrations/tools/linkup_search).
-
-```python
-from langchain_linkup import LinkupSearchTool
-
-tool = LinkupSearchTool(
-    depth="deep",  # "standard" or "deep"
-    output_type="searchResults",  # "searchResults", "sourcedAnswer" or "structured"
-    linkup_api_key=None,  # API key can be passed here or set as the LINKUP_API_KEY environment variable
-)
-```
diff --git a/langchain_md_files/integrations/providers/llama_index.mdx b/langchain_md_files/integrations/providers/llama_index.mdx
deleted file mode 100644
index 4ba7ac0eebae6fe06614d8d4b254eeedb036de00..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/llama_index.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
-# LlamaIndex
-
->[LlamaIndex](https://www.llamaindex.ai/) is the leading data framework for building LLM applications
-
-
-## Installation and Setup
-
-You need to install the `llama-index` python package.
-
-```bash
-pip install llama-index
-```
-
-See the [installation instructions](https://docs.llamaindex.ai/en/stable/getting_started/installation/).
-
-## Retrievers
-
-### LlamaIndexRetriever
-
->It is used for the question-answering with sources over an LlamaIndex data structure.
-
-```python
-from langchain_community.retrievers.llama_index import LlamaIndexRetriever
-```
-
-### LlamaIndexGraphRetriever
-
->It is used for question-answering with sources over an LlamaIndex graph data structure.
-
-```python
-from langchain_community.retrievers.llama_index import LlamaIndexGraphRetriever
-```
diff --git a/langchain_md_files/integrations/providers/llamacpp.mdx b/langchain_md_files/integrations/providers/llamacpp.mdx
deleted file mode 100644
index de7d40a1c5ae46f6674bc9d9c8e0c4921a9481de..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/llamacpp.mdx
+++ /dev/null
@@ -1,50 +0,0 @@
-# Llama.cpp
-
->[llama.cpp python](https://github.com/abetlen/llama-cpp-python) library is a simple Python bindings for `@ggerganov`
->[llama.cpp](https://github.com/ggerganov/llama.cpp).
->
->This package provides:
->
-> - Low-level access to C API via ctypes interface.
-> - High-level Python API for text completion
->   - `OpenAI`-like API
->   - `LangChain` compatibility
->   - `LlamaIndex` compatibility
-> - OpenAI compatible web server
->   - Local Copilot replacement
->   - Function Calling support
->   - Vision API support
->   - Multiple Models
-
-## Installation and Setup
-
-- Install the Python package
-  ```bash
-  pip install llama-cpp-python
-  ````
-- Download one of the [supported models](https://github.com/ggerganov/llama.cpp#description) and convert them to the llama.cpp format per the [instructions](https://github.com/ggerganov/llama.cpp)
-
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/llamacpp).
-
-```python
-from langchain_community.chat_models import ChatLlamaCpp
-```
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/llamacpp).
-
-```python
-from langchain_community.llms import LlamaCpp
-```
-
-## Embedding models
-
-See a [usage example](/docs/integrations/text_embedding/llamacpp).
-
-```python
-from langchain_community.embeddings import LlamaCppEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/llamaedge.mdx b/langchain_md_files/integrations/providers/llamaedge.mdx
deleted file mode 100644
index 64fbb50d389db60374ed8d7230abe104c1c41b7f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/llamaedge.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-# LlamaEdge
-
->[LlamaEdge](https://llamaedge.com/docs/intro/) is the easiest & fastest way to run customized 
-> and fine-tuned LLMs locally or on the edge.
->
->* Lightweight inference apps. `LlamaEdge` is in MBs instead of GBs
->* Native and GPU accelerated performance
->* Supports many GPU and hardware accelerators
->* Supports many optimized inference libraries
->* Wide selection of AI / LLM models
-
-
-
-## Installation and Setup
-
-See the [installation instructions](https://llamaedge.com/docs/user-guide/quick-start-command).
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/llama_edge).
-
-```python
-from langchain_community.chat_models.llama_edge import LlamaEdgeChatService
-```
diff --git a/langchain_md_files/integrations/providers/llamafile.mdx b/langchain_md_files/integrations/providers/llamafile.mdx
deleted file mode 100644
index d0d0f268d100b6ae5ca4021e358e015fae6e278c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/llamafile.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# llamafile
-
->[llamafile](https://github.com/Mozilla-Ocho/llamafile) lets you distribute and run LLMs 
-> with a single file.
-
->`llamafile` makes open LLMs much more accessible to both developers and end users. 
-> `llamafile` is doing that by combining [llama.cpp](https://github.com/ggerganov/llama.cpp) with 
-> [Cosmopolitan Libc](https://github.com/jart/cosmopolitan) into one framework that collapses 
-> all the complexity of LLMs down to a single-file executable (called a "llamafile") 
-> that runs locally on most computers, with no installation.
-
-
-## Installation and Setup
-
-See the [installation instructions](https://github.com/Mozilla-Ocho/llamafile?tab=readme-ov-file#quickstart).
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/llamafile).
-
-```python
-from langchain_community.llms.llamafile import Llamafile
-```
-
-## Embedding models
-
-See a [usage example](/docs/integrations/text_embedding/llamafile).
-
-```python
-from langchain_community.embeddings import LlamafileEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/llmonitor.mdx b/langchain_md_files/integrations/providers/llmonitor.mdx
deleted file mode 100644
index 90fb10a26401309bedb7a1faca0848741697c040..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/llmonitor.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# LLMonitor
-
->[LLMonitor](https://llmonitor.com?utm_source=langchain&utm_medium=py&utm_campaign=docs) is an open-source observability platform that provides cost and usage analytics, user tracking, tracing and evaluation tools.
-
-## Installation and Setup
-
-Create an account on [llmonitor.com](https://llmonitor.com?utm_source=langchain&utm_medium=py&utm_campaign=docs), then copy your new app's `tracking id`.
-
-Once you have it, set it as an environment variable by running:
-
-```bash
-export LLMONITOR_APP_ID="..."
-```
-
-
-## Callbacks
-
-See a [usage example](/docs/integrations/callbacks/llmonitor).
-
-```python
-from langchain.callbacks import LLMonitorCallbackHandler
-```
diff --git a/langchain_md_files/integrations/providers/localai.mdx b/langchain_md_files/integrations/providers/localai.mdx
deleted file mode 100644
index 663eb592498cee43b2e83f0b40a36d89fd204967..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/localai.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# LocalAI
-
->[LocalAI](https://localai.io/) is the free, Open Source OpenAI alternative. 
-> `LocalAI` act as a drop-in replacement REST API that’s compatible with OpenAI API 
-> specifications for local inferencing. It allows you to run LLMs, generate images, 
-> audio (and not only) locally or on-prem with consumer grade hardware, 
-> supporting multiple model families and architectures.
-
-:::caution
-For proper compatibility, please ensure you are using the `openai` SDK at version **0.x**.
-:::
-
-:::info
-`langchain-localai` is a 3rd party integration package for LocalAI. It provides a simple way to use LocalAI services in Langchain.
-The source code is available on [Github](https://github.com/mkhludnev/langchain-localai)
-:::
-
-## Installation and Setup
-
-We have to install several python packages: 
-
-```bash
-pip install tenacity openai
-```
-
-
-## Embedding models
-
-See a [usage example](/docs/integrations/text_embedding/localai).
-
-```python
-from langchain_localai import LocalAIEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/log10.mdx b/langchain_md_files/integrations/providers/log10.mdx
deleted file mode 100644
index b4378506e7c0932296688ead50be23d414ea329b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/log10.mdx
+++ /dev/null
@@ -1,104 +0,0 @@
-# Log10
-
-This page covers how to use the [Log10](https://log10.io) within LangChain.
-
-## What is Log10?
-
-Log10 is an [open-source](https://github.com/log10-io/log10) proxiless LLM data management and application development platform that lets you log, debug and tag your Langchain calls.
-
-## Quick start
-
-1. Create your free account at [log10.io](https://log10.io)
-2. Add your `LOG10_TOKEN` and `LOG10_ORG_ID` from the Settings and Organization tabs respectively as environment variables.
-3. Also add `LOG10_URL=https://log10.io` and your usual LLM API key: for e.g. `OPENAI_API_KEY` or `ANTHROPIC_API_KEY` to your environment
-
-## How to enable Log10 data management for Langchain
-
-Integration with log10 is a simple one-line `log10_callback` integration as shown below:
-
-```python
-from langchain_openai import ChatOpenAI
-from langchain_core.messages import HumanMessage
-
-from log10.langchain import Log10Callback
-from log10.llm import Log10Config
-
-log10_callback = Log10Callback(log10_config=Log10Config())
-
-messages = [
-    HumanMessage(content="You are a ping pong machine"),
-    HumanMessage(content="Ping?"),
-]
-
-llm = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[log10_callback])
-```
-
-[Log10 + Langchain + Logs docs](https://github.com/log10-io/log10/blob/main/logging.md#langchain-logger)
-
-[More details + screenshots](https://log10.io/docs/observability/logs) including instructions for self-hosting logs
-
-## How to use tags with Log10
-
-```python
-from langchain_openai import OpenAI
-from langchain_community.chat_models import ChatAnthropic
-from langchain_openai import ChatOpenAI
-from langchain_core.messages import HumanMessage
-
-from log10.langchain import Log10Callback
-from log10.llm import Log10Config
-
-log10_callback = Log10Callback(log10_config=Log10Config())
-
-messages = [
-    HumanMessage(content="You are a ping pong machine"),
-    HumanMessage(content="Ping?"),
-]
-
-llm = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5, tags=["test"])
-completion = llm.predict_messages(messages, tags=["foobar"])
-print(completion)
-
-llm = ChatAnthropic(model="claude-2", callbacks=[log10_callback], temperature=0.7, tags=["baz"])
-llm.predict_messages(messages)
-print(completion)
-
-llm = OpenAI(model_name="gpt-3.5-turbo-instruct", callbacks=[log10_callback], temperature=0.5)
-completion = llm.predict("You are a ping pong machine.\nPing?\n")
-print(completion)
-```
-
-You can also intermix direct OpenAI calls and Langchain LLM calls:
-
-```python
-import os
-from log10.load import log10, log10_session
-import openai
-from langchain_openai import OpenAI
-
-log10(openai)
-
-with log10_session(tags=["foo", "bar"]):
-    # Log a direct OpenAI call
-    response = openai.Completion.create(
-        model="text-ada-001",
-        prompt="Where is the Eiffel Tower?",
-        temperature=0,
-        max_tokens=1024,
-        top_p=1,
-        frequency_penalty=0,
-        presence_penalty=0,
-    )
-    print(response)
-
-    # Log a call via Langchain
-    llm = OpenAI(model_name="text-ada-001", temperature=0.5)
-    response = llm.predict("You are a ping pong machine.\nPing?\n")
-    print(response)
-```
-
-## How to debug Langchain calls
-
-[Example of debugging](https://log10.io/docs/observability/prompt_chain_debugging)
-
-[More Langchain examples](https://github.com/log10-io/log10/tree/main/examples#langchain)
diff --git a/langchain_md_files/integrations/providers/maritalk.mdx b/langchain_md_files/integrations/providers/maritalk.mdx
deleted file mode 100644
index 6b0dcda545690c7a2e1e43007f3634a05bc0103c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/maritalk.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# MariTalk
-
->[MariTalk](https://www.maritaca.ai/en) is an LLM-based chatbot trained to meet the needs of Brazil.
-
-## Installation and Setup
-
-You have to get the MariTalk API key.
-
-You also need to install the `httpx` Python package.
-
-```bash
-pip install httpx
-```
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/maritalk).
-
-```python
-from langchain_community.chat_models import ChatMaritalk
-```
diff --git a/langchain_md_files/integrations/providers/mediawikidump.mdx b/langchain_md_files/integrations/providers/mediawikidump.mdx
deleted file mode 100644
index 52f5fde1e71283eb4792494f7df1268475d0d001..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/mediawikidump.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# MediaWikiDump
-
->[MediaWiki XML Dumps](https://www.mediawiki.org/wiki/Manual:Importing_XML_dumps) contain the content of a wiki 
-> (wiki pages with all their revisions), without the site-related data. A XML dump does not create a full backup 
-> of the wiki database, the dump does not contain user accounts, images, edit logs, etc.
-
-
-## Installation and Setup
-
-We need to install several python packages.
-
-The `mediawiki-utilities` supports XML schema 0.11 in unmerged branches.
-```bash
-pip install -qU git+https://github.com/mediawiki-utilities/python-mwtypes@updates_schema_0.11
-```
-
-The `mediawiki-utilities mwxml` has a bug, fix PR pending.
-
-```bash
-pip install -qU git+https://github.com/gdedrouas/python-mwxml@xml_format_0.11
-pip install -qU mwparserfromhell
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/mediawikidump).
-
-
-```python
-from langchain_community.document_loaders import MWDumpLoader
-```
diff --git a/langchain_md_files/integrations/providers/meilisearch.mdx b/langchain_md_files/integrations/providers/meilisearch.mdx
deleted file mode 100644
index 31cc5d4c22ad14893d56a727765e936ce09cda42..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/meilisearch.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
-# Meilisearch
-
-> [Meilisearch](https://meilisearch.com) is an open-source, lightning-fast, and hyper
-> relevant search engine. 
-> It comes with great defaults to help developers build snappy search experiences. 
->
-> You can [self-host Meilisearch](https://www.meilisearch.com/docs/learn/getting_started/installation#local-installation) 
-> or run on [Meilisearch Cloud](https://www.meilisearch.com/pricing).
->
->`Meilisearch v1.3` supports vector search.
-
-## Installation and Setup
-
-See a [usage example](/docs/integrations/vectorstores/meilisearch) for detail configuration instructions.
-
-
-We need to install `meilisearch` python package.
-
-```bash
-pip install meilisearch
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/meilisearch).
-
-```python
-from langchain_community.vectorstores import Meilisearch
-```
-
diff --git a/langchain_md_files/integrations/providers/memcached.mdx b/langchain_md_files/integrations/providers/memcached.mdx
deleted file mode 100644
index f7719deda4031d69311600a41c5519acd21c587a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/memcached.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-# Memcached
-
-> [Memcached](https://www.memcached.org/) is a free & open source, high-performance, distributed memory object caching system,
-> generic in nature, but intended for use in speeding up dynamic web applications by alleviating database load.
-
-This page covers how to use Memcached with langchain, using [pymemcache](https://github.com/pinterest/pymemcache) as
-a client to connect to an already running Memcached instance.
-
-## Installation and Setup
-```bash
-pip install pymemcache
-```
-
-## LLM Cache
-
-To integrate a Memcached Cache into your application:
-```python3
-from langchain.globals import set_llm_cache
-from langchain_openai import OpenAI
-
-from langchain_community.cache import MemcachedCache
-from pymemcache.client.base import Client
-
-llm = OpenAI(model="gpt-3.5-turbo-instruct", n=2, best_of=2)
-set_llm_cache(MemcachedCache(Client('localhost')))
-
-# The first time, it is not yet in cache, so it should take longer
-llm.invoke("Which city is the most crowded city in the USA?")
-
-# The second time it is, so it goes faster
-llm.invoke("Which city is the most crowded city in the USA?")
-```
-
-Learn more in the [example notebook](/docs/integrations/llm_caching#memcached-cache)
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/metal.mdx b/langchain_md_files/integrations/providers/metal.mdx
deleted file mode 100644
index 455830b2db775d1d332940c06249987a0aad8f4c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/metal.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
-# Metal
-
-This page covers how to use [Metal](https://getmetal.io) within LangChain.
-
-## What is Metal?
-
-Metal is a  managed retrieval & memory platform built for production. Easily index your data into `Metal` and run semantic search and retrieval on it.
-
-![Screenshot of the Metal dashboard showing the Browse Index feature with sample data.](/img/MetalDash.png "Metal Dashboard Interface")
-
-## Quick start
-
-Get started by [creating a Metal account](https://app.getmetal.io/signup).
-
-Then, you can easily take advantage of the `MetalRetriever` class to start retrieving your data for semantic search, prompting context, etc. This class takes a `Metal` instance and a dictionary of parameters to pass to the Metal API.
-
-```python
-from langchain.retrievers import MetalRetriever
-from metal_sdk.metal import Metal
-
-
-metal = Metal("API_KEY", "CLIENT_ID", "INDEX_ID");
-retriever = MetalRetriever(metal, params={"limit": 2})
-
-docs = retriever.invoke("search term")
-```
diff --git a/langchain_md_files/integrations/providers/microsoft.mdx b/langchain_md_files/integrations/providers/microsoft.mdx
deleted file mode 100644
index 518d4869d47f2a6d8f7da8bfa716c81f4684e6dc..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/microsoft.mdx
+++ /dev/null
@@ -1,650 +0,0 @@
----
-keywords: [azure]
----
-
-# Microsoft
-
-All functionality related to `Microsoft Azure` and other `Microsoft` products.
-
-## Chat Models
-
-### Azure OpenAI
-
->[Microsoft Azure](https://en.wikipedia.org/wiki/Microsoft_Azure), often referred to as `Azure` is a cloud computing platform run by `Microsoft`, which offers access, management, and development of applications and services through global data centers. It provides a range of capabilities, including software as a service (SaaS), platform as a service (PaaS), and infrastructure as a service (IaaS). `Microsoft Azure` supports many programming languages, tools, and frameworks, including Microsoft-specific and third-party software and systems.
-
->[Azure OpenAI](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) is an `Azure` service with powerful language models from `OpenAI` including the `GPT-3`, `Codex` and `Embeddings model` series for content generation, summarization, semantic search, and natural language to code translation.
-
-```bash
-pip install langchain-openai
-```
-
-Set the environment variables to get access to the `Azure OpenAI` service.
-
-```python
-import os
-
-os.environ["AZURE_OPENAI_ENDPOINT"] = "https://<your-endpoint.openai.azure.com/"
-os.environ["AZURE_OPENAI_API_KEY"] = "your AzureOpenAI key"
-```
-
-See a [usage example](/docs/integrations/chat/azure_chat_openai)
-
-
-```python
-from langchain_openai import AzureChatOpenAI
-```
-
-### Azure ML Chat Online Endpoint
-
-See the documentation [here](/docs/integrations/chat/azureml_chat_endpoint) for accessing chat
-models hosted with [Azure Machine Learning](https://azure.microsoft.com/en-us/products/machine-learning/).
-
-
-## LLMs
-
-### Azure ML
-
-See a [usage example](/docs/integrations/llms/azure_ml).
-
-```python
-from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
-```
-
-### Azure OpenAI
-
-See a [usage example](/docs/integrations/llms/azure_openai).
-
-```python
-from langchain_openai import AzureOpenAI
-```
-
-## Embedding Models
-### Azure OpenAI
-
-See a [usage example](/docs/integrations/text_embedding/azureopenai)
-
-```python
-from langchain_openai import AzureOpenAIEmbeddings
-```
-
-## Document loaders
-
-### Azure AI Data
-
->[Azure AI Studio](https://ai.azure.com/) provides the capability to upload data assets 
-> to cloud storage and register existing data assets from the following sources:
->
->- `Microsoft OneLake`
->- `Azure Blob Storage`
->- `Azure Data Lake gen 2`
-
-First, you need to install several python packages.
-
-```bash
-pip install azureml-fsspec, azure-ai-generative
-```
-
-See a [usage example](/docs/integrations/document_loaders/azure_ai_data).
-
-```python
-from langchain.document_loaders import AzureAIDataLoader
-```
-
-
-### Azure AI Document Intelligence
-
->[Azure AI Document Intelligence](https://aka.ms/doc-intelligence) (formerly known
-> as `Azure Form Recognizer`) is machine-learning
-> based service that extracts texts (including handwriting), tables, document structures, 
-> and key-value-pairs
-> from digital or scanned PDFs, images, Office and HTML files.
->
-> Document Intelligence supports `PDF`, `JPEG/JPG`, `PNG`, `BMP`, `TIFF`, `HEIF`, `DOCX`, `XLSX`, `PPTX` and `HTML`.
-
-First, you need to install a python package.
-
-```bash
-pip install azure-ai-documentintelligence
-```
-
-See a [usage example](/docs/integrations/document_loaders/azure_document_intelligence).
-
-```python
-from langchain.document_loaders import AzureAIDocumentIntelligenceLoader
-```
-
-
-### Azure Blob Storage
-
->[Azure Blob Storage](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction) is Microsoft's object storage solution for the cloud. Blob Storage is optimized for storing massive amounts of unstructured data. Unstructured data is data that doesn't adhere to a particular data model or definition, such as text or binary data.
-
->[Azure Files](https://learn.microsoft.com/en-us/azure/storage/files/storage-files-introduction) offers fully managed
-> file shares in the cloud that are accessible via the industry standard Server Message Block (`SMB`) protocol,
-> Network File System (`NFS`) protocol, and `Azure Files REST API`. `Azure Files` are based on the `Azure Blob Storage`.
-
-`Azure Blob Storage` is designed for:
-- Serving images or documents directly to a browser.
-- Storing files for distributed access.
-- Streaming video and audio.
-- Writing to log files.
-- Storing data for backup and restore, disaster recovery, and archiving.
-- Storing data for analysis by an on-premises or Azure-hosted service.
-
-```bash
-pip install azure-storage-blob
-```
-
-See a [usage example for the Azure Blob Storage](/docs/integrations/document_loaders/azure_blob_storage_container).
-
-```python
-from langchain_community.document_loaders import AzureBlobStorageContainerLoader
-```
-
-See a [usage example for the Azure Files](/docs/integrations/document_loaders/azure_blob_storage_file).
-
-```python
-from langchain_community.document_loaders import AzureBlobStorageFileLoader
-```
-
-
-### Microsoft OneDrive
-
->[Microsoft OneDrive](https://en.wikipedia.org/wiki/OneDrive) (formerly `SkyDrive`) is a file-hosting service operated by Microsoft.
-
-First, you need to install a python package.
-
-```bash
-pip install o365
-```
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_onedrive).
-
-```python
-from langchain_community.document_loaders import OneDriveLoader
-```
-
-### Microsoft OneDrive File
-
->[Microsoft OneDrive](https://en.wikipedia.org/wiki/OneDrive) (formerly `SkyDrive`) is a file-hosting service operated by Microsoft.
-
-First, you need to install a python package.
-
-```bash
-pip install o365
-```
-
-```python
-from langchain_community.document_loaders import OneDriveFileLoader
-```
-
-
-### Microsoft Word
-
->[Microsoft Word](https://www.microsoft.com/en-us/microsoft-365/word) is a word processor developed by Microsoft.
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_word).
-
-```python
-from langchain_community.document_loaders import UnstructuredWordDocumentLoader
-```
-
-
-### Microsoft Excel
-
->[Microsoft Excel](https://en.wikipedia.org/wiki/Microsoft_Excel) is a spreadsheet editor developed by 
-> Microsoft for Windows, macOS, Android, iOS and iPadOS. 
-> It features calculation or computation capabilities, graphing tools, pivot tables, and a macro programming 
-> language called Visual Basic for Applications (VBA). Excel forms part of the Microsoft 365 suite of software.
-
-The `UnstructuredExcelLoader` is used to load `Microsoft Excel` files. The loader works with both `.xlsx` and `.xls` files. 
-The page content will be the raw text of the Excel file. If you use the loader in `"elements"` mode, an HTML 
-representation of the Excel file will be available in the document metadata under the `text_as_html` key.
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_excel).
-
-```python
-from langchain_community.document_loaders import UnstructuredExcelLoader
-```
-
-
-### Microsoft SharePoint
-
->[Microsoft SharePoint](https://en.wikipedia.org/wiki/SharePoint) is a website-based collaboration system 
-> that uses workflow applications, “list” databases, and other web parts and security features to 
-> empower business teams to work together developed by Microsoft.
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_sharepoint).
-
-```python
-from langchain_community.document_loaders.sharepoint import SharePointLoader
-```
-
-
-### Microsoft PowerPoint
-
->[Microsoft PowerPoint](https://en.wikipedia.org/wiki/Microsoft_PowerPoint) is a presentation program by Microsoft.
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_powerpoint).
-
-```python
-from langchain_community.document_loaders import UnstructuredPowerPointLoader
-```
-
-### Microsoft OneNote
-
-First, let's install dependencies:
-
-```bash
-pip install bs4 msal
-```
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_onenote).
-
-```python
-from langchain_community.document_loaders.onenote import OneNoteLoader
-```
-
-### Playwright URL Loader
-
->[Playwright](https://github.com/microsoft/playwright) is an open-source automation tool 
-> developed by `Microsoft` that allows you to programmatically control and automate 
-> web browsers. It is designed for end-to-end testing, scraping, and automating 
-> tasks across various web browsers such as `Chromium`, `Firefox`, and `WebKit`.
-
-
-First, let's install dependencies:
-
-```bash
-pip install playwright unstructured
-```
-
-See a [usage example](/docs/integrations/document_loaders/url/#playwright-url-loader).
-
-```python
-from langchain_community.document_loaders.onenote import OneNoteLoader
-```
-
-## Vector Stores
-
-### Azure Cosmos DB
-AI agents can rely on Azure Cosmos DB as a unified [memory system](https://learn.microsoft.com/en-us/azure/cosmos-db/ai-agents#memory-can-make-or-break-agents) solution, enjoying speed, scale, and simplicity. This service successfully [enabled OpenAI's ChatGPT service](https://www.youtube.com/watch?v=6IIUtEFKJec&t) to scale dynamically with high reliability and low maintenance. Powered by an atom-record-sequence engine, it is the world's first globally distributed [NoSQL](https://learn.microsoft.com/en-us/azure/cosmos-db/distributed-nosql), [relational](https://learn.microsoft.com/en-us/azure/cosmos-db/distributed-relational), and [vector database](https://learn.microsoft.com/en-us/azure/cosmos-db/vector-database) service that offers a serverless mode. 
-
-Below are two available Azure Cosmos DB APIs that can provide vector store functionalities.
-
-#### Azure Cosmos DB for MongoDB (vCore)
-
->[Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/) makes it easy to create a database with full native MongoDB support.
-> You can apply your MongoDB experience and continue to use your favorite MongoDB drivers, SDKs, and tools by pointing your application to the API for MongoDB vCore account's connection string.
-> Use vector search in Azure Cosmos DB for MongoDB vCore to seamlessly integrate your AI-based applications with your data that's stored in Azure Cosmos DB.
-
-##### Installation and Setup
-
-See [detail configuration instructions](/docs/integrations/vectorstores/azure_cosmos_db).
-
-We need to install `pymongo` python package.
-
-```bash
-pip install pymongo
-```
-
-##### Deploy Azure Cosmos DB on Microsoft Azure
-
-Azure Cosmos DB for MongoDB vCore provides developers with a fully managed MongoDB-compatible database service for building modern applications with a familiar architecture.
-
-With Cosmos DB for MongoDB vCore, developers can enjoy the benefits of native Azure integrations, low total cost of ownership (TCO), and the familiar vCore architecture when migrating existing applications or building new ones.
-
-[Sign Up](https://azure.microsoft.com/en-us/free/) for free to get started today.
-
-See a [usage example](/docs/integrations/vectorstores/azure_cosmos_db).
-
-```python
-from langchain_community.vectorstores import AzureCosmosDBVectorSearch
-```
-
-#### Azure Cosmos DB NoSQL
-
->[Azure Cosmos DB for NoSQL](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/vector-search) now offers vector indexing and search in preview.
-This feature is designed to handle high-dimensional vectors, enabling efficient and accurate vector search at any scale. You can now store vectors
-directly in the documents alongside your data. This means that each document in your database can contain not only traditional schema-free data,
-but also high-dimensional vectors as other properties of the documents. This colocation of data and vectors allows for efficient indexing and searching,
-as the vectors are stored in the same logical unit as the data they represent. This simplifies data management, AI application architectures, and the
-efficiency of vector-based operations.
-
-##### Installation and Setup
-
-See [detail configuration instructions](/docs/integrations/vectorstores/azure_cosmos_db_no_sql).
-
-We need to install `azure-cosmos` python package.
-
-```bash
-pip install azure-cosmos
-```
-
-##### Deploy Azure Cosmos DB on Microsoft Azure
-
-Azure Cosmos DB offers a solution for modern apps and intelligent workloads by being very responsive with dynamic and elastic autoscale. It is available
-in every Azure region and can automatically replicate data closer to users. It has SLA guaranteed low-latency and high availability.
-
-[Sign Up](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/quickstart-python?pivots=devcontainer-codespace) for free to get started today.
-
-See a [usage example](/docs/integrations/vectorstores/azure_cosmos_db_no_sql).
-
-```python
-from langchain_community.vectorstores import AzureCosmosDBNoSQLVectorSearch
-```
-
-### Azure Database for PostgreSQL
-
->[Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) is a relational database service based on the open-source Postgres database engine. It's a fully managed database-as-a-service that can handle mission-critical workloads with predictable performance, security, high availability, and dynamic scalability.
-
-See [set up instructions](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/quickstart-create-server-portal) for Azure Database for PostgreSQL. 
-
-See a [usage example](/docs/integrations/memory/postgres_chat_message_history/). Simply use the [connection string](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/connect-python?tabs=cmd%2Cpassword#add-authentication-code) from your Azure Portal. 
-
-Since Azure Database for PostgreSQL is open-source Postgres, you can use the [LangChain's Postgres support](/docs/integrations/vectorstores/pgvector/) to connect to Azure Database for PostgreSQL.
-
-### Azure SQL Database
-
->[Azure SQL Database](https://learn.microsoft.com/azure/azure-sql/database/sql-database-paas-overview?view=azuresql) is a robust service that combines scalability, security, and high availability, providing all the benefits of a modern database solution.  It also provides a dedicated Vector data type & built-in functions that simplifies the storage and querying of vector embeddings directly within a relational database. This eliminates the need for separate vector databases and related integrations, increasing the security of your solutions while reducing the overall complexity.
-
-By leveraging your current SQL Server databases for vector search, you can enhance data capabilities while minimizing expenses and avoiding the challenges of transitioning to new systems.
-
-##### Installation and Setup
-
-See [detail configuration instructions](/docs/integrations/vectorstores/sqlserver).
-
-We need to install the `langchain-sqlserver` python package.
-
-```bash
-!pip install langchain-sqlserver==0.1.1
-```
-
-##### Deploy Azure SQL DB on Microsoft Azure
-
-[Sign Up](https://learn.microsoft.com/azure/azure-sql/database/free-offer?view=azuresql) for free to get started today.
-
-See a [usage example](/docs/integrations/vectorstores/sqlserver).
-
-```python
-from langchain_sqlserver import SQLServer_VectorStore
-```
-
-### Azure AI Search
-
-[Azure AI Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) is a cloud search service
-that gives developers infrastructure, APIs, and tools for information retrieval of vector, keyword, and hybrid
-queries at scale. See [here](/docs/integrations/vectorstores/azuresearch) for usage examples.
-
-```python
-from langchain_community.vectorstores.azuresearch import AzureSearch
-```
-
-## Retrievers
-
-### Azure AI Search
-
->[Azure AI Search](https://learn.microsoft.com/en-us/azure/search/search-what-is-azure-search) (formerly known as `Azure Search` or `Azure Cognitive Search` ) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications.
-
->Search is foundational to any app that surfaces text to users, where common scenarios include catalog or document search, online retail apps, or data exploration over proprietary content. When you create a search service, you'll work with the following capabilities:
->- A search engine for full text search over a search index containing user-owned content
->- Rich indexing, with lexical analysis and optional AI enrichment for content extraction and transformation
->- Rich query syntax for text search, fuzzy search, autocomplete, geo-search and more
->- Programmability through REST APIs and client libraries in Azure SDKs
->- Azure integration at the data layer, machine learning layer, and AI (AI Services)
-
-See [set up instructions](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal).
-
-See a [usage example](/docs/integrations/retrievers/azure_ai_search).
-
-```python
-from langchain_community.retrievers import AzureAISearchRetriever
-```
-
-## Vector Store
-### Azure Database for PostgreSQL
->[Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) is a relational database service based on the open-source Postgres database engine. It's a fully managed database-as-a-service that can handle mission-critical workloads with predictable performance, security, high availability, and dynamic scalability.
-
-See [set up instructions](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/quickstart-create-server-portal) for Azure Database for PostgreSQL. 
-
-You need to [enable pgvector extension](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/how-to-use-pgvector) in your database to use Postgres as a vector store. Once you have the extension enabled, you can use the [PGVector in LangChain](/docs/integrations/vectorstores/pgvector/) to connect to Azure Database for PostgreSQL. 
-
-See a [usage example](/docs/integrations/vectorstores/pgvector/). Simply use the [connection string](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/connect-python?tabs=cmd%2Cpassword#add-authentication-code) from your Azure Portal. 
-
-
-## Tools
-
-### Azure Container Apps dynamic sessions
-
-We need to get the `POOL_MANAGEMENT_ENDPOINT` environment variable from the Azure Container Apps service.
-See the instructions [here](/docs/integrations/tools/azure_dynamic_sessions/#setup).
-
-We need to install a python package.
-
-```bash
-pip install langchain-azure-dynamic-sessions
-```
-
-See a [usage example](/docs/integrations/tools/azure_dynamic_sessions).
-
-```python
-from langchain_azure_dynamic_sessions import SessionsPythonREPLTool
-```
-
-### Bing Search
-
-Follow the documentation [here](/docs/integrations/tools/bing_search) to get a detail explanations and instructions of this tool.
-
-The environment variable `BING_SUBSCRIPTION_KEY` and `BING_SEARCH_URL` are required from Bing Search resource.
-
-```python
-from langchain_community.tools.bing_search import BingSearchResults
-from langchain_community.utilities import BingSearchAPIWrapper
-
-api_wrapper = BingSearchAPIWrapper()
-tool = BingSearchResults(api_wrapper=api_wrapper)
-```
-
-## Toolkits
-
-### Azure AI Services
-
-We need to install several python packages.
-
-```bash
-pip install azure-ai-formrecognizer azure-cognitiveservices-speech azure-ai-vision-imageanalysis
-```
-
-See a [usage example](/docs/integrations/tools/azure_ai_services).
-
-```python
-from langchain_community.agent_toolkits import azure_ai_services
-```
-
-#### Azure AI Services individual tools
-
-The `azure_ai_services` toolkit includes the following tools:
-
-- Image Analysis: [AzureAiServicesImageAnalysisTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.azure_ai_services.image_analysis.AzureAiServicesImageAnalysisTool.html)
-- Document Intelligence: [AzureAiServicesDocumentIntelligenceTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.azure_ai_services.document_intelligence.AzureAiServicesDocumentIntelligenceTool.html)
-- Speech to Text: [AzureAiServicesSpeechToTextTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.azure_ai_services.speech_to_text.AzureAiServicesSpeechToTextTool.html)
-- Text to Speech: [AzureAiServicesTextToSpeechTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.azure_ai_services.text_to_speech.AzureAiServicesTextToSpeechTool.html)
-- Text Analytics for Health: [AzureAiServicesTextAnalyticsForHealthTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.azure_ai_services.text_analytics_for_health.AzureAiServicesTextAnalyticsForHealthTool.html)
-
-### Azure Cognitive Services
-
-We need to install several python packages.
-
-```bash
-pip install azure-ai-formrecognizer azure-cognitiveservices-speech azure-ai-vision-imageanalysis
-```
-
-See a [usage example](/docs/integrations/tools/azure_cognitive_services).
-
-```python
-from langchain_community.agent_toolkits import AzureCognitiveServicesToolkit
-```
-
-#### Azure AI Services individual tools
-
-The `azure_ai_services` toolkit includes the tools that queries the `Azure Cognitive Services`:
-- `AzureCogsFormRecognizerTool`: Form Recognizer API
-- `AzureCogsImageAnalysisTool`: Image Analysis API
-- `AzureCogsSpeech2TextTool`: Speech2Text API
-- `AzureCogsText2SpeechTool`: Text2Speech API
-- `AzureCogsTextAnalyticsHealthTool`: Text Analytics for Health API
-
-```python
-from langchain_community.tools.azure_cognitive_services import (
-    AzureCogsFormRecognizerTool,
-    AzureCogsImageAnalysisTool,
-    AzureCogsSpeech2TextTool,
-    AzureCogsText2SpeechTool,
-    AzureCogsTextAnalyticsHealthTool,
-)
-```
-
-### Microsoft Office 365 email and calendar
-
-We need to install `O365` python package.
-
-```bash
-pip install O365
-```
-
-
-See a [usage example](/docs/integrations/tools/office365).
-
-```python
-from langchain_community.agent_toolkits import O365Toolkit
-```
-
-#### Office 365 individual tools
-
-You can use individual tools from the Office 365 Toolkit:
-- `O365CreateDraftMessage`: creating a draft email in Office 365
-- `O365SearchEmails`: searching email messages in Office 365
-- `O365SearchEvents`: searching calendar events in Office 365
-- `O365SendEvent`: sending calendar events in Office 365
-- `O365SendMessage`: sending an email in Office 365
-
-```python
-from langchain_community.tools.office365 import O365CreateDraftMessage
-from langchain_community.tools.office365 import O365SearchEmails
-from langchain_community.tools.office365 import O365SearchEvents
-from langchain_community.tools.office365 import O365SendEvent
-from langchain_community.tools.office365 import O365SendMessage
-```
-
-### Microsoft Azure PowerBI
-
-We need to install `azure-identity` python package.
-
-```bash
-pip install azure-identity
-```
-
-See a [usage example](/docs/integrations/tools/powerbi).
-
-```python
-from langchain_community.agent_toolkits import PowerBIToolkit
-from langchain_community.utilities.powerbi import PowerBIDataset
-```
-
-#### PowerBI individual tools
-
-You can use individual tools from the Azure PowerBI Toolkit:
-- `InfoPowerBITool`: getting metadata about a PowerBI Dataset
-- `ListPowerBITool`: getting tables names
-- `QueryPowerBITool`: querying a PowerBI Dataset
-
-```python
-from langchain_community.tools.powerbi.tool import InfoPowerBITool
-from langchain_community.tools.powerbi.tool import ListPowerBITool
-from langchain_community.tools.powerbi.tool import QueryPowerBITool
-```
-
-
-### PlayWright Browser Toolkit
-
->[Playwright](https://github.com/microsoft/playwright) is an open-source automation tool 
-> developed by `Microsoft` that allows you to programmatically control and automate 
-> web browsers. It is designed for end-to-end testing, scraping, and automating 
-> tasks across various web browsers such as `Chromium`, `Firefox`, and `WebKit`.
-
-We need to install several python packages.
-
-```bash
-pip install playwright lxml
-```
-
-See a [usage example](/docs/integrations/tools/playwright).
-
-```python
-from langchain_community.agent_toolkits import PlayWrightBrowserToolkit
-```
-
-#### PlayWright Browser individual tools
-
-You can use individual tools from the PlayWright Browser Toolkit.
-
-```python
-from langchain_community.tools.playwright import ClickTool
-from langchain_community.tools.playwright import CurrentWebPageTool
-from langchain_community.tools.playwright import ExtractHyperlinksTool
-from langchain_community.tools.playwright import ExtractTextTool
-from langchain_community.tools.playwright import GetElementsTool
-from langchain_community.tools.playwright import NavigateTool
-from langchain_community.tools.playwright import NavigateBackTool
-```
-
-## Graphs
-
-### Azure Cosmos DB for Apache Gremlin
-
-We need to install a python package.
-
-```bash
-pip install gremlinpython
-```
-
-See a [usage example](/docs/integrations/graphs/azure_cosmosdb_gremlin).
-
-```python
-from langchain_community.graphs import GremlinGraph
-from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship
-```
-
-## Utilities
-
-### Bing Search API
-
->[Microsoft Bing](https://www.bing.com/), commonly referred to as `Bing` or `Bing Search`, 
-> is a web search engine owned and operated by `Microsoft`.
-
-See a [usage example](/docs/integrations/tools/bing_search).
-
-```python
-from langchain_community.utilities import BingSearchAPIWrapper
-```
-
-## More
-
-### Microsoft Presidio
-
->[Presidio](https://microsoft.github.io/presidio/) (Origin from Latin praesidium ‘protection, garrison’) 
-> helps to ensure sensitive data is properly managed and governed. It provides fast identification and 
-> anonymization modules for private entities in text and images such as credit card numbers, names, 
-> locations, social security numbers, bitcoin wallets, US phone numbers, financial data and more.
-
-First, you need to install several python packages and download a `SpaCy` model.
-
-```bash
-pip install langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker
-python -m spacy download en_core_web_lg
-```
-
-See [usage examples](https://python.langchain.com/v0.1/docs/guides/productionization/safety/presidio_data_anonymization).
-
-```python
-from langchain_experimental.data_anonymizer import PresidioAnonymizer, PresidioReversibleAnonymizer
-```
diff --git a/langchain_md_files/integrations/providers/milvus.mdx b/langchain_md_files/integrations/providers/milvus.mdx
deleted file mode 100644
index d73590fdff0c076a4e4afb837f8604f8c8c69ac3..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/milvus.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# Milvus
-
->[Milvus](https://milvus.io/docs/overview.md) is a database that stores, indexes, and manages
-> massive embedding vectors generated by deep neural networks and other machine learning (ML) models.
-
-
-## Installation and Setup
-
-Install the Python SDK:
-
-```bash
-pip install langchain-milvus
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/milvus).
-
-To import this vectorstore:
-```python
-from langchain_milvus import Milvus
-```
-
-## Retrievers
-
-See a [usage example](/docs/integrations/retrievers/milvus_hybrid_search).
-
-To import this vectorstore:
-```python
-from langchain_milvus.retrievers import MilvusCollectionHybridSearchRetriever
-from langchain_milvus.utils.sparse import BM25SparseEmbedding
-```
-
diff --git a/langchain_md_files/integrations/providers/mindsdb.mdx b/langchain_md_files/integrations/providers/mindsdb.mdx
deleted file mode 100644
index 678d16f8127550a677bdfbc2f41aa9a5f9ee4e3d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/mindsdb.mdx
+++ /dev/null
@@ -1,14 +0,0 @@
-# MindsDB
-
-MindsDB is the platform for customizing AI from enterprise data. With MindsDB and it's nearly 200 integrations to [data sources](https://docs.mindsdb.com/integrations/data-overview) and [AI/ML frameworks](https://docs.mindsdb.com/integrations/ai-overview), any developer can use their enterprise data to customize AI for their purpose, faster and more securely.
-
-With MindsDB, you can connect any data source to any AI/ML model to implement and automate AI-powered applications. Deploy, serve, and fine-tune models in real-time, utilizing data from databases, vector stores, or applications. Do all that using universal tools developers already know.
-
-MindsDB integrates with LangChain, enabling users to:
-
-
-- Deploy models available via LangChain within MindsDB, making them accessible to numerous data sources.
-- Fine-tune models available via LangChain within MindsDB using real-time and dynamic data.
-- Automate AI workflows with LangChain and MindsDB.
-
-Follow [our docs](https://docs.mindsdb.com/integrations/ai-engines/langchain) to learn more about MindsDB’s integration with LangChain and see examples.
diff --git a/langchain_md_files/integrations/providers/minimax.mdx b/langchain_md_files/integrations/providers/minimax.mdx
deleted file mode 100644
index a472380920a1a44999385ddfa8f70d5d7e79d223..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/minimax.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# Minimax
-
->[Minimax](https://api.minimax.chat) is a Chinese startup that provides natural language processing models
-> for companies and individuals.
-
-## Installation and Setup
-Get a [Minimax api key](https://api.minimax.chat/user-center/basic-information/interface-key) and set it as an environment variable (`MINIMAX_API_KEY`)
-Get a [Minimax group id](https://api.minimax.chat/user-center/basic-information) and set it as an environment variable (`MINIMAX_GROUP_ID`)
-
-
-## LLM
-
-There exists a Minimax LLM wrapper, which you can access with
-See a [usage example](/docs/integrations/llms/minimax).
-
-```python
-from langchain_community.llms import Minimax
-```
-
-## Chat Models
-
-See a [usage example](/docs/integrations/chat/minimax)
-
-```python
-from langchain_community.chat_models import MiniMaxChat
-```
-
-## Text Embedding Model
-
-There exists a Minimax Embedding model, which you can access with
-```python
-from langchain_community.embeddings import MiniMaxEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/mistralai.mdx b/langchain_md_files/integrations/providers/mistralai.mdx
deleted file mode 100644
index ba6790aabfceee3e21c46186ae70155aa75f9412..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/mistralai.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-# MistralAI
-
->[Mistral AI](https://docs.mistral.ai/api/) is a platform that offers hosting for their powerful open source models.
-
-
-## Installation and Setup
-
-A valid [API key](https://console.mistral.ai/users/api-keys/) is needed to communicate with the API.
-
-You will also need the `langchain-mistralai` package:
-
-```bash
-pip install langchain-mistralai
-```
-
-## Chat models
-
-### ChatMistralAI
-
-See a [usage example](/docs/integrations/chat/mistralai).
-
-```python
-from langchain_mistralai.chat_models import ChatMistralAI
-```
-
-## Embedding models
-
-### MistralAIEmbeddings
-
-See a [usage example](/docs/integrations/text_embedding/mistralai).
-
-```python
-from langchain_mistralai import MistralAIEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/mlflow.mdx b/langchain_md_files/integrations/providers/mlflow.mdx
deleted file mode 100644
index 861154a0b8a3e79adb4dd3bca9873bde4897a3a0..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/mlflow.mdx
+++ /dev/null
@@ -1,119 +0,0 @@
-# MLflow AI Gateway for LLMs
-
->[The MLflow AI Gateway for LLMs](https://www.mlflow.org/docs/latest/llms/deployments/index.html) is a powerful tool designed to streamline the usage and management of various large
-> language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface
-> that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests.
-
-## Installation and Setup
-
-Install `mlflow` with MLflow GenAI dependencies:
-
-```sh
-pip install 'mlflow[genai]'
-```
-
-Set the OpenAI API key as an environment variable:
-
-```sh
-export OPENAI_API_KEY=...
-```
-
-Create a configuration file:
-
-```yaml
-endpoints:
-  - name: completions
-    endpoint_type: llm/v1/completions
-    model:
-      provider: openai
-      name: text-davinci-003
-      config:
-        openai_api_key: $OPENAI_API_KEY
-
-  - name: embeddings
-    endpoint_type: llm/v1/embeddings
-    model:
-      provider: openai
-      name: text-embedding-ada-002
-      config:
-        openai_api_key: $OPENAI_API_KEY
-```
-
-Start the gateway server:
-
-```sh
-mlflow gateway start --config-path /path/to/config.yaml
-```
-
-## Example provided by `MLflow`
-
->The `mlflow.langchain` module provides an API for logging and loading `LangChain` models.
-> This module exports multivariate LangChain models in the langchain flavor and univariate LangChain
-> models in the pyfunc flavor.
-
-See the [API documentation and examples](https://www.mlflow.org/docs/latest/llms/langchain/index.html) for more information.
-
-## Completions Example
-
-```python
-import mlflow
-from langchain.chains import LLMChain, PromptTemplate
-from langchain_community.llms import Mlflow
-
-llm = Mlflow(
-    target_uri="http://127.0.0.1:5000",
-    endpoint="completions",
-)
-
-llm_chain = LLMChain(
-    llm=Mlflow,
-    prompt=PromptTemplate(
-        input_variables=["adjective"],
-        template="Tell me a {adjective} joke",
-    ),
-)
-result = llm_chain.run(adjective="funny")
-print(result)
-
-with mlflow.start_run():
-    model_info = mlflow.langchain.log_model(chain, "model")
-
-model = mlflow.pyfunc.load_model(model_info.model_uri)
-print(model.predict([{"adjective": "funny"}]))
-```
-
-## Embeddings Example
-
-```python
-from langchain_community.embeddings import MlflowEmbeddings
-
-embeddings = MlflowEmbeddings(
-    target_uri="http://127.0.0.1:5000",
-    endpoint="embeddings",
-)
-
-print(embeddings.embed_query("hello"))
-print(embeddings.embed_documents(["hello"]))
-```
-
-## Chat Example
-
-```python
-from langchain_community.chat_models import ChatMlflow
-from langchain_core.messages import HumanMessage, SystemMessage
-
-chat = ChatMlflow(
-    target_uri="http://127.0.0.1:5000",
-    endpoint="chat",
-)
-
-messages = [
-    SystemMessage(
-        content="You are a helpful assistant that translates English to French."
-    ),
-    HumanMessage(
-        content="Translate this sentence from English to French: I love programming."
-    ),
-]
-print(chat(messages))
-```
diff --git a/langchain_md_files/integrations/providers/mlx.mdx b/langchain_md_files/integrations/providers/mlx.mdx
deleted file mode 100644
index dc859305cdee3df85d77337a12a32467b81a78fb..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/mlx.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-# MLX
-
->[MLX](https://ml-explore.github.io/mlx/build/html/index.html) is a `NumPy`-like array framework 
-> designed for efficient and flexible machine learning on `Apple` silicon, 
-> brought to you by `Apple machine learning research`.
-
-
-## Installation and Setup
-
-Install several Python packages:
-
-```bash
-pip install mlx-lm transformers huggingface_hub
-````
-
-
-## Chat models
-
-
-See a [usage example](/docs/integrations/chat/mlx).
-
-```python
-from langchain_community.chat_models.mlx import ChatMLX
-```
-
-## LLMs
-
-### MLX Local Pipelines
-
-See a [usage example](/docs/integrations/llms/mlx_pipelines).
-
-```python
-from langchain_community.llms.mlx_pipeline import MLXPipeline
-```
diff --git a/langchain_md_files/integrations/providers/modal.mdx b/langchain_md_files/integrations/providers/modal.mdx
deleted file mode 100644
index 7e02799d717a12d68df42e4deb2db5e6e34c579a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/modal.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
-# Modal
-
-This page covers how to use the Modal ecosystem to run LangChain custom LLMs.
-It is broken into two parts: 
-
-1. Modal installation and web endpoint deployment
-2. Using deployed web endpoint with `LLM` wrapper class.
-
-## Installation and Setup
-
-- Install with `pip install modal`
-- Run `modal token new`
-
-## Define your Modal Functions and Webhooks
-
-You must include a prompt. There is a rigid response structure:
-
-```python
-class Item(BaseModel):
-    prompt: str
-
-@stub.function()
-@modal.web_endpoint(method="POST")
-def get_text(item: Item):
-    return {"prompt": run_gpt2.call(item.prompt)}
-```
-
-The following is an example with the GPT2 model:
-
-```python
-from pydantic import BaseModel
-
-import modal
-
-CACHE_PATH = "/root/model_cache"
-
-class Item(BaseModel):
-    prompt: str
-
-stub = modal.Stub(name="example-get-started-with-langchain")
-
-def download_model():
-    from transformers import GPT2Tokenizer, GPT2LMHeadModel
-    tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
-    model = GPT2LMHeadModel.from_pretrained('gpt2')
-    tokenizer.save_pretrained(CACHE_PATH)
-    model.save_pretrained(CACHE_PATH)
-
-# Define a container image for the LLM function below, which
-# downloads and stores the GPT-2 model.
-image = modal.Image.debian_slim().pip_install(
-    "tokenizers", "transformers", "torch", "accelerate"
-).run_function(download_model)
-
-@stub.function(
-    gpu="any",
-    image=image,
-    retries=3,
-)
-def run_gpt2(text: str):
-    from transformers import GPT2Tokenizer, GPT2LMHeadModel
-    tokenizer = GPT2Tokenizer.from_pretrained(CACHE_PATH)
-    model = GPT2LMHeadModel.from_pretrained(CACHE_PATH)
-    encoded_input = tokenizer(text, return_tensors='pt').input_ids
-    output = model.generate(encoded_input, max_length=50, do_sample=True)
-    return tokenizer.decode(output[0], skip_special_tokens=True)
-
-@stub.function()
-@modal.web_endpoint(method="POST")
-def get_text(item: Item):
-    return {"prompt": run_gpt2.call(item.prompt)}
-```
-
-### Deploy the web endpoint
-
-Deploy the web endpoint to Modal cloud with the [`modal deploy`](https://modal.com/docs/reference/cli/deploy) CLI command.
-Your web endpoint will acquire a persistent URL under the `modal.run` domain.
-
-## LLM wrapper around Modal web endpoint
-
-The  `Modal` LLM wrapper class which will accept your deployed web endpoint's URL.
-
-```python
-from langchain_community.llms import Modal
-
-endpoint_url = "https://ecorp--custom-llm-endpoint.modal.run"  # REPLACE ME with your deployed Modal web endpoint's URL
-
-llm = Modal(endpoint_url=endpoint_url)
-llm_chain = LLMChain(prompt=prompt, llm=llm)
-
-question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
-
-llm_chain.run(question)
-```
-
diff --git a/langchain_md_files/integrations/providers/modelscope.mdx b/langchain_md_files/integrations/providers/modelscope.mdx
deleted file mode 100644
index 30c50e33bd58aa94699c19ab23390e62af5c5a61..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/modelscope.mdx
+++ /dev/null
@@ -1,50 +0,0 @@
-# ModelScope
-
->[ModelScope](https://www.modelscope.cn/home) is a big repository of the models and datasets.
-
-This page covers how to use the modelscope ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific modelscope wrappers.
-
-## Installation
-
-```bash
-pip install -U langchain-modelscope-integration
-```
-
-Head to [ModelScope](https://modelscope.cn/) to sign up to ModelScope and generate an [SDK token](https://modelscope.cn/my/myaccesstoken). Once you've done this set the `MODELSCOPE_SDK_TOKEN` environment variable:
-
-```bash
-export MODELSCOPE_SDK_TOKEN=<your_sdk_token>
-```
-
-## Chat Models
-
-`ModelScopeChatEndpoint` class exposes chat models from ModelScope. See available models [here](https://www.modelscope.cn/docs/model-service/API-Inference/intro).
-
-```python
-from langchain_modelscope import ModelScopeChatEndpoint
-
-llm = ModelScopeChatEndpoint(model="Qwen/Qwen2.5-Coder-32B-Instruct")
-llm.invoke("Sing a ballad of LangChain.")
-```
-
-## Embeddings
-
-`ModelScopeEmbeddings` class exposes embeddings from ModelScope.
-
-```python
-from langchain_modelscope import ModelScopeEmbeddings
-
-embeddings = ModelScopeEmbeddings(model_id="damo/nlp_corom_sentence-embedding_english-base")
-embeddings.embed_query("What is the meaning of life?")
-```
-
-## LLMs
-`ModelScopeLLM` class exposes LLMs from ModelScope.
-
-```python
-from langchain_modelscope import ModelScopeLLM
-
-llm = ModelScopeLLM(model="Qwen/Qwen2.5-Coder-32B-Instruct")
-llm.invoke("The meaning of life is")
-```
diff --git a/langchain_md_files/integrations/providers/modern_treasury.mdx b/langchain_md_files/integrations/providers/modern_treasury.mdx
deleted file mode 100644
index 908f17644effdbce09eb9c0e1cd0a68c01762ae0..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/modern_treasury.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Modern Treasury
-
->[Modern Treasury](https://www.moderntreasury.com/) simplifies complex payment operations. It is a unified platform to power products and processes that move money.
->- Connect to banks and payment systems
->- Track transactions and balances in real-time
->- Automate payment operations for scale
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/modern_treasury).
-
-
-```python
-from langchain_community.document_loaders import ModernTreasuryLoader
-```
diff --git a/langchain_md_files/integrations/providers/momento.mdx b/langchain_md_files/integrations/providers/momento.mdx
deleted file mode 100644
index 6d39999878037c699a13ee3e9856f79baa8a5162..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/momento.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
-# Momento
-
-> [Momento Cache](https://docs.momentohq.com/) is the world's first truly serverless caching service, offering instant elasticity, scale-to-zero
-> capability, and blazing-fast performance.
->
-> [Momento Vector Index](https://docs.momentohq.com/vector-index) stands out as the most productive, easiest-to-use, fully serverless vector index.
->
-> For both services, simply grab the SDK, obtain an API key, input a few lines into your code, and you're set to go. Together, they provide a comprehensive solution for your LLM data needs.
-
-This page covers how to use the [Momento](https://gomomento.com) ecosystem within LangChain.
-
-## Installation and Setup
-
-- Sign up for a free account [here](https://console.gomomento.com/) to get an API key
-- Install the Momento Python SDK with `pip install momento`
-
-## Cache
-
-Use Momento as a serverless, distributed, low-latency cache for LLM prompts and responses. The standard cache is the primary use case for Momento users in any environment.
-
-To integrate Momento Cache into your application:
-
-```python
-from langchain.cache import MomentoCache
-```
-
-Then, set it up with the following code:
-
-```python
-from datetime import timedelta
-from momento import CacheClient, Configurations, CredentialProvider
-from langchain.globals import set_llm_cache
-
-# Instantiate the Momento client
-cache_client = CacheClient(
-    Configurations.Laptop.v1(),
-    CredentialProvider.from_environment_variable("MOMENTO_API_KEY"),
-    default_ttl=timedelta(days=1))
-
-# Choose a Momento cache name of your choice
-cache_name = "langchain"
-
-# Instantiate the LLM cache
-set_llm_cache(MomentoCache(cache_client, cache_name))
-```
-
-## Memory
-
-Momento can be used as a distributed memory store for LLMs.
-
-See [this notebook](/docs/integrations/memory/momento_chat_message_history) for a walkthrough of how to use Momento as a memory store for chat message history.
-
-```python
-from langchain.memory import MomentoChatMessageHistory
-```
-
-## Vector Store
-
-Momento Vector Index (MVI) can be used as a vector store.
-
-See [this notebook](/docs/integrations/vectorstores/momento_vector_index) for a walkthrough of how to use MVI as a vector store.
-
-```python
-from langchain_community.vectorstores import MomentoVectorIndex
-```
diff --git a/langchain_md_files/integrations/providers/mongodb.mdx b/langchain_md_files/integrations/providers/mongodb.mdx
deleted file mode 100644
index f981cdc13027114ae97efd1df80cd99ce108876a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/mongodb.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-# MongoDB
-
->[MongoDB](https://www.mongodb.com/) is a NoSQL, document-oriented 
-> database that supports JSON-like documents with a dynamic schema.
- 
-**NOTE:** 
-- See other `MongoDB` integrations on the [MongoDB Atlas page](/docs/integrations/providers/mongodb_atlas).
-
-## Installation and Setup
-
-Install the Python package:
-
-```bash
-pip install langchain-mongodb
-```
-
-## Message Histories
-
-See a [usage example](/docs/integrations/memory/mongodb_chat_message_history).
-
-To import this vectorstore:
-```python
-from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
-```
diff --git a/langchain_md_files/integrations/providers/mongodb_atlas.mdx b/langchain_md_files/integrations/providers/mongodb_atlas.mdx
deleted file mode 100644
index 601b84f015cbdafc8d8dcbe4b6d343a9fe447400..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/mongodb_atlas.mdx
+++ /dev/null
@@ -1,102 +0,0 @@
-# MongoDB Atlas
-
->[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud
-> database available in AWS, Azure, and GCP.  It now has support for native 
-> Vector Search on the MongoDB document data.
-
-## Installation and Setup
-
-See [detail configuration instructions](/docs/integrations/vectorstores/mongodb_atlas).
-
-We need to install `langchain-mongodb` python package.
-
-```bash
-pip install langchain-mongodb
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/mongodb_atlas).
-
-```python
-from langchain_mongodb import MongoDBAtlasVectorSearch
-```
-
-## Retrievers
-
-### Full Text Search Retriever
-
->`Hybrid Search Retriever` performs full-text searches using 
-> Lucene’s standard (`BM25`) analyzer.
-
-```python
-from langchain_mongodb.retrievers import MongoDBAtlasFullTextSearchRetriever
-```
-
-### Hybrid Search Retriever
-
->`Hybrid Search Retriever` combines vector and full-text searches weighting 
-> them the via `Reciprocal Rank Fusion` (`RRF`) algorithm.
- 
-```python
-from langchain_mongodb.retrievers import MongoDBAtlasHybridSearchRetriever
-```
-
-## Model Caches
-
-### MongoDBCache
-
-An abstraction to store a simple cache in MongoDB. This does not use Semantic Caching, nor does it require an index to be made on the collection before generation.
-
-To import this cache:
-```python
-from langchain_mongodb.cache import MongoDBCache
-```
-
-To use this cache with your LLMs:
-```python
-from langchain_core.globals import set_llm_cache
-
-# use any embedding provider...
-from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
-
-mongodb_atlas_uri = "<YOUR_CONNECTION_STRING>"
-COLLECTION_NAME="<YOUR_CACHE_COLLECTION_NAME>"
-DATABASE_NAME="<YOUR_DATABASE_NAME>"
-
-set_llm_cache(MongoDBCache(
-    connection_string=mongodb_atlas_uri,
-    collection_name=COLLECTION_NAME,
-    database_name=DATABASE_NAME,
-))
-```
-
-
-### MongoDBAtlasSemanticCache
-Semantic caching allows users to retrieve cached prompts based on semantic similarity between the user input and previously cached results. Under the hood it blends MongoDBAtlas as both a cache and a vectorstore.
-The MongoDBAtlasSemanticCache inherits from `MongoDBAtlasVectorSearch` and needs an Atlas Vector Search Index defined to work. Please look at the [usage example](/docs/integrations/vectorstores/mongodb_atlas) on how to set up the index.
-
-To import this cache:
-```python
-from langchain_mongodb.cache import MongoDBAtlasSemanticCache
-```
-
-To use this cache with your LLMs:
-```python
-from langchain_core.globals import set_llm_cache
-
-# use any embedding provider...
-from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
-
-mongodb_atlas_uri = "<YOUR_CONNECTION_STRING>"
-COLLECTION_NAME="<YOUR_CACHE_COLLECTION_NAME>"
-DATABASE_NAME="<YOUR_DATABASE_NAME>"
-
-set_llm_cache(MongoDBAtlasSemanticCache(
-    embedding=FakeEmbeddings(),
-    connection_string=mongodb_atlas_uri,
-    collection_name=COLLECTION_NAME,
-    database_name=DATABASE_NAME,
-))
-```
-``
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/motherduck.mdx b/langchain_md_files/integrations/providers/motherduck.mdx
deleted file mode 100644
index 790f8167aaa759fb73503dde7a9b728763d7273a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/motherduck.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
-# Motherduck
-
->[Motherduck](https://motherduck.com/) is a managed DuckDB-in-the-cloud service.
-
-## Installation and Setup
-
-First, you need to install `duckdb` python package.
-
-```bash
-pip install duckdb
-```
-
-You will also need to sign up for an account at [Motherduck](https://motherduck.com/)
-
-After that, you should set up a connection string - we mostly integrate with Motherduck through SQLAlchemy.
-The connection string is likely in the form:
-
-```
-token="..."
-
-conn_str = f"duckdb:///md:{token}@my_db"
-```
-
-## SQLChain
-
-You can use the SQLChain to query data in your Motherduck instance in natural language.
-
-```
-from langchain_openai import OpenAI
-from langchain_community.utilities import SQLDatabase
-from langchain_experimental.sql import SQLDatabaseChain
-db = SQLDatabase.from_uri(conn_str)
-db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db, verbose=True)
-```
-
-From here, see the [SQL Chain](/docs/how_to#qa-over-sql--csv) documentation on how to use.
-
-
-## LLMCache
-
-You can also easily use Motherduck to cache LLM requests.
-Once again this is done through the SQLAlchemy wrapper.
-
-```
-import sqlalchemy
-from langchain.globals import set_llm_cache
-eng = sqlalchemy.create_engine(conn_str)
-set_llm_cache(SQLAlchemyCache(engine=eng))
-```
-
-From here, see the [LLM Caching](/docs/integrations/llm_caching) documentation on how to use.
-
-
diff --git a/langchain_md_files/integrations/providers/motorhead.mdx b/langchain_md_files/integrations/providers/motorhead.mdx
deleted file mode 100644
index 0d88c47f0d458e1e5574beba0689b646749bf146..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/motorhead.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-# Motörhead
-
->[Motörhead](https://github.com/getmetal/motorhead) is a memory server implemented in Rust. It automatically handles incremental summarization in the background and allows for stateless applications.
-
-## Installation and Setup
-
-See instructions at [Motörhead](https://github.com/getmetal/motorhead) for running the server locally.
-
-
-## Memory
-
-See a [usage example](/docs/integrations/memory/motorhead_memory).
-
-```python
-from langchain_community.memory import MotorheadMemory
-```
diff --git a/langchain_md_files/integrations/providers/myscale.mdx b/langchain_md_files/integrations/providers/myscale.mdx
deleted file mode 100644
index 8192983ef93d340820cd6c50e66c060e9fde968f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/myscale.mdx
+++ /dev/null
@@ -1,66 +0,0 @@
-# MyScale
-
-This page covers how to use MyScale vector database within LangChain.
-It is broken into two parts: installation and setup, and then references to specific MyScale wrappers.
-
-With MyScale, you can manage both structured and unstructured (vectorized) data, and perform joint queries and analytics on both types of data using SQL. Plus, MyScale's cloud-native OLAP architecture, built on top of ClickHouse, enables lightning-fast data processing even on massive datasets.
-
-## Introduction
-
-[Overview to MyScale and High performance vector search](https://docs.myscale.com/en/overview/)
-
-You can now register on our SaaS and [start a cluster now!](https://docs.myscale.com/en/quickstart/)
-
-If you are also interested in how we managed to integrate SQL and vector, please refer to [this document](https://docs.myscale.com/en/vector-reference/) for further syntax reference.
-
-We also deliver with live demo on huggingface! Please checkout our [huggingface space](https://huggingface.co/myscale)! They search millions of vector within a blink!
-
-## Installation and Setup
-- Install the Python SDK with `pip install clickhouse-connect`
-
-### Setting up environments
-
-There are two ways to set up parameters for myscale index.
-
-1. Environment Variables
-
-    Before you run the app, please set the environment variable with `export`:
-    `export MYSCALE_HOST='<your-endpoints-url>' MYSCALE_PORT=<your-endpoints-port> MYSCALE_USERNAME=<your-username> MYSCALE_PASSWORD=<your-password> ...`
-
-    You can easily find your account, password and other info on our SaaS. For details please refer to [this document](https://docs.myscale.com/en/cluster-management/)
-    Every attributes under `MyScaleSettings` can be set with prefix `MYSCALE_` and is case insensitive.
-
-2. Create `MyScaleSettings` object with parameters
-
-
-    ```python
-    from langchain_community.vectorstores import MyScale, MyScaleSettings
-    config = MyScaleSettings(host="<your-backend-url>", port=8443, ...)
-    index = MyScale(embedding_function, config)
-    index.add_documents(...)
-    ```
-  
-## Wrappers
-supported functions:
-- `add_texts`
-- `add_documents`
-- `from_texts`
-- `from_documents`
-- `similarity_search`
-- `asimilarity_search`
-- `similarity_search_by_vector`
-- `asimilarity_search_by_vector`
-- `similarity_search_with_relevance_scores`
-- `delete`
-
-### VectorStore
-
-There exists a wrapper around MyScale database, allowing you to use it as a vectorstore,
-whether for semantic search or similar example retrieval.
-
-To import this vectorstore:
-```python
-from langchain_community.vectorstores import MyScale
-```
-
-For a more detailed walkthrough of the MyScale wrapper, see [this notebook](/docs/integrations/vectorstores/myscale)
diff --git a/langchain_md_files/integrations/providers/naver.mdx b/langchain_md_files/integrations/providers/naver.mdx
deleted file mode 100644
index b9f6a1ca7fed3e6006d230db9db2679bd88fec49..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/naver.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
-# NAVER
-
-All functionality related to `Naver`, including HyperCLOVA X models, particularly those accessible through `Naver Cloud` [CLOVA Studio](https://clovastudio.ncloud.com/).
-
-> [Naver](https://navercorp.com/) is a global technology company with cutting-edge technologies and a diverse business portfolio including search, commerce, fintech, content, cloud, and AI.
-
-> [Naver Cloud](https://www.navercloudcorp.com/lang/en/) is the cloud computing arm of Naver, a leading cloud service provider offering a comprehensive suite of cloud services to businesses through its [Naver Cloud Platform (NCP)](https://www.ncloud.com/).
-
-Please refer to [NCP User Guide](https://guide.ncloud-docs.com/docs/clovastudio-overview) for more detailed instructions (also in Korean).
-
-## Installation and Setup
-
-- Get a CLOVA Studio API Key by [issuing it](https://api.ncloud-docs.com/docs/ai-naver-clovastudio-summary#API%ED%82%A4) and set it as an environment variable (`NCP_CLOVASTUDIO_API_KEY`).
-    - If you are using a legacy API Key (that doesn't start with `nv-*` prefix), you might need to get an additional API Key by [creating your app](https://guide.ncloud-docs.com/docs/en/clovastudio-playground01#create-test-app) and set it as `NCP_APIGW_API_KEY`.
-- Install the integration Python package with:
-
-```bash
-pip install -U langchain-community
-```
-
-## Chat models
-
-### ChatClovaX 
-
-See a [usage example](/docs/integrations/chat/naver).
-
-```python
-from langchain_community.chat_models import ChatClovaX
-```
-
-## Embedding models
-
-### ClovaXEmbeddings
-
-See a [usage example](/docs/integrations/text_embedding/naver).
-
-```python
-from langchain_community.embeddings import ClovaXEmbeddings
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/neo4j.mdx b/langchain_md_files/integrations/providers/neo4j.mdx
deleted file mode 100644
index 2b8d8f683cc9f33e50f2545262a5c5623d4ca847..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/neo4j.mdx
+++ /dev/null
@@ -1,59 +0,0 @@
-# Neo4j
-
->What is `Neo4j`?
-
->- Neo4j is an `open-source database management system` that specializes in graph database technology.
->- Neo4j allows you to represent and store data in nodes and edges, making it ideal for handling connected data and relationships.
->- Neo4j provides a `Cypher Query Language`, making it easy to interact with and query your graph data.
->- With Neo4j, you can achieve high-performance `graph traversals and queries`, suitable for production-level systems.
-
->Get started with Neo4j by visiting [their website](https://neo4j.com/).
-
-## Installation and Setup
-
-- Install the Python SDK with `pip install neo4j langchain-neo4j`
-
-
-## VectorStore
-
-The Neo4j vector index is used as a vectorstore,
-whether for semantic search or example selection.
-
-```python
-from langchain_neo4j import Neo4jVector
-```
-
-See a [usage example](/docs/integrations/vectorstores/neo4jvector)
-
-## GraphCypherQAChain
-
-There exists a wrapper around Neo4j graph database that allows you to generate Cypher statements based on the user input
-and use them to retrieve relevant information from the database.
-
-```python
-from langchain_neo4j import GraphCypherQAChain, Neo4jGraph
-```
-
-See a [usage example](/docs/integrations/graphs/neo4j_cypher)
-
-## Constructing a knowledge graph from text
-
-Text data often contain rich relationships and insights that can be useful for various analytics, recommendation engines, or knowledge management applications.
-Diffbot's NLP API allows for the extraction of entities, relationships, and semantic meaning from unstructured text data.
-By coupling Diffbot's NLP API with Neo4j, a graph database, you can create powerful, dynamic graph structures based on the information extracted from text.
-These graph structures are fully queryable and can be integrated into various applications.
-
-```python
-from langchain_neo4j import Neo4jGraph
-from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer
-```
-
-See a [usage example](/docs/integrations/graphs/diffbot)
-
-## Memory
-
-See a [usage example](/docs/integrations/memory/neo4j_chat_message_history).
-
-```python
-from langchain_neo4j import Neo4jChatMessageHistory
-```
diff --git a/langchain_md_files/integrations/providers/nlpcloud.mdx b/langchain_md_files/integrations/providers/nlpcloud.mdx
deleted file mode 100644
index f6d664833a18d8303a65e1baebe930557da4edb1..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/nlpcloud.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# NLPCloud
-
->[NLP Cloud](https://docs.nlpcloud.com/#introduction) is an artificial intelligence platform that allows you to use the most advanced AI engines, and even train your own engines with your own data. 
-
-
-## Installation and Setup
-
-- Install the `nlpcloud` package.
-
-```bash
-pip install nlpcloud
-```
-
-- Get an NLPCloud api key and set it as an environment variable (`NLPCLOUD_API_KEY`)
-
-
-## LLM
-
-See a [usage example](/docs/integrations/llms/nlpcloud).
-
-```python
-from langchain_community.llms import NLPCloud
-```
-
-## Text Embedding Models
-
-See a [usage example](/docs/integrations/text_embedding/nlp_cloud)
-
-```python
-from langchain_community.embeddings import NLPCloudEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/nomic.mdx b/langchain_md_files/integrations/providers/nomic.mdx
deleted file mode 100644
index f825e3c74e36041d427d12a8e3c81e5745ac41f2..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/nomic.mdx
+++ /dev/null
@@ -1,58 +0,0 @@
-# Nomic
-
->[Nomic](https://www.nomic.ai/) builds tools that enable everyone to interact with AI scale datasets and run AI models on consumer computers.
->
->`Nomic` currently offers two products:
->
->- `Atlas`: the Visual Data Engine
->- `GPT4All`: the Open Source Edge Language Model Ecosystem
-
-The Nomic integration exists in two partner packages: [langchain-nomic](https://pypi.org/project/langchain-nomic/)
-and in [langchain-community](https://pypi.org/project/langchain-community/). 
-
-## Installation
-
-You can install them with:
-
-```bash
-pip install -U langchain-nomic
-pip install -U langchain-community
-```
-
-## LLMs
-
-### GPT4All
-
-See [a usage example](/docs/integrations/llms/gpt4all).
-
-```python
-from langchain_community.llms import GPT4All
-```
-
-## Embedding models
-
-### NomicEmbeddings
-
-See [a usage example](/docs/integrations/text_embedding/nomic).
-
-```python
-from langchain_nomic import NomicEmbeddings
-```
-
-### GPT4All
-
-See [a usage example](/docs/integrations/text_embedding/gpt4all).
-
-```python
-from langchain_community.embeddings import GPT4AllEmbeddings
-```
-
-## Vector store
-
-### Atlas
-
-See [a usage example and installation instructions](/docs/integrations/vectorstores/atlas).
-
-```python
-from langchain_community.vectorstores import AtlasDB
-```
diff --git a/langchain_md_files/integrations/providers/notion.mdx b/langchain_md_files/integrations/providers/notion.mdx
deleted file mode 100644
index 6ed4fd306fc935cd530281b1899331a58025a5c3..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/notion.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# Notion DB
-
->[Notion](https://www.notion.so/) is a collaboration platform with modified Markdown support that integrates kanban 
-> boards, tasks, wikis and databases. It is an all-in-one workspace for notetaking, knowledge and data management, 
-> and project and task management.
-
-## Installation and Setup
-
-All instructions are in examples below.
-
-## Document Loader
-
-We have two different loaders: `NotionDirectoryLoader` and `NotionDBLoader`.
-
-See [usage examples here](/docs/integrations/document_loaders/notion).
-
-
-```python
-from langchain_community.document_loaders import NotionDirectoryLoader, NotionDBLoader
-```
diff --git a/langchain_md_files/integrations/providers/nuclia.mdx b/langchain_md_files/integrations/providers/nuclia.mdx
deleted file mode 100644
index 91daeb6a5a242e7f79d64fc6bd8f1cbcf9a77109..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/nuclia.mdx
+++ /dev/null
@@ -1,78 +0,0 @@
-# Nuclia
-
->[Nuclia](https://nuclia.com) automatically indexes your unstructured data from any internal
-> and external source, providing optimized search results and generative answers. 
-> It can handle video and audio transcription, image content extraction, and document parsing.
-
-
-
-## Installation and Setup
-
-We need to install the `nucliadb-protos` package to use the `Nuclia Understanding API`
-
-```bash
-pip install nucliadb-protos
-```
-
-We need to have a `Nuclia account`. 
-We can create one for free at [https://nuclia.cloud](https://nuclia.cloud), 
-and then [create a NUA key](https://docs.nuclia.dev/docs/docs/using/understanding/intro).
-
-
-## Document Transformer
-
-### Nuclia
-
->`Nuclia Understanding API` document transformer splits text into paragraphs and sentences, 
-> identifies entities, provides a summary of the text and generates embeddings for all the sentences.
-
-To use the Nuclia document transformer, we need to instantiate a `NucliaUnderstandingAPI`
-tool with `enable_ml` set to `True`:
-
-```python
-from langchain_community.tools.nuclia import NucliaUnderstandingAPI
-
-nua = NucliaUnderstandingAPI(enable_ml=True)
-```
-
-See a [usage example](/docs/integrations/document_transformers/nuclia_transformer).
-
-```python
-from langchain_community.document_transformers.nuclia_text_transform import NucliaTextTransformer
-```
-
-## Document Loaders
-
-### Nuclea loader
-
-See a [usage example](/docs/integrations/document_loaders/nuclia).
-
-```python
-from langchain_community.document_loaders.nuclia import NucliaLoader
-```
-
-## Vector store
-
-### NucliaDB
-
-We need to install a python package:
-
-```bash
-pip install nuclia
-```
-
-See a [usage example](/docs/integrations/vectorstores/nucliadb).
-
-```python
-from langchain_community.vectorstores.nucliadb import NucliaDB
-```
-
-## Tools
-
-### Nuclia Understanding
-
-See a [usage example](/docs/integrations/tools/nuclia).
-
-```python
-from langchain_community.tools.nuclia import NucliaUnderstandingAPI
-```
diff --git a/langchain_md_files/integrations/providers/nvidia.mdx b/langchain_md_files/integrations/providers/nvidia.mdx
deleted file mode 100644
index 2dc6bf2f43837a297dd35c9ac72ca1bec4ca310c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/nvidia.mdx
+++ /dev/null
@@ -1,82 +0,0 @@
-# NVIDIA
-The `langchain-nvidia-ai-endpoints` package contains LangChain integrations building applications with models on 
-NVIDIA NIM inference microservice. NIM supports models across domains like chat, embedding, and re-ranking models 
-from the community as well as NVIDIA. These models are optimized by NVIDIA to deliver the best performance on NVIDIA 
-accelerated infrastructure and deployed as a NIM, an easy-to-use, prebuilt containers that deploy anywhere using a single 
-command on NVIDIA accelerated infrastructure.
-
-NVIDIA hosted deployments of NIMs are available to test on the [NVIDIA API catalog](https://build.nvidia.com/). After testing, 
-NIMs can be exported from NVIDIA’s API catalog using the NVIDIA AI Enterprise license and run on-premises or in the cloud, 
-giving enterprises ownership and full control of their IP and AI application.
-
-NIMs are packaged as container images on a per model basis and are distributed as NGC container images through the NVIDIA NGC Catalog. 
-At their core, NIMs provide easy, consistent, and familiar APIs for running inference on an AI model.
-
-Below is an example on how to use some common functionality surrounding text-generative and embedding models.
-
-## Installation
-
-```python
-pip install -U --quiet langchain-nvidia-ai-endpoints
-```
-
-## Setup
-
-**To get started:**
-
-1. Create a free account with [NVIDIA](https://build.nvidia.com/), which hosts NVIDIA AI Foundation models.
-
-2. Click on your model of choice.
-
-3. Under Input select the Python tab, and click `Get API Key`. Then click `Generate Key`.
-
-4. Copy and save the generated key as NVIDIA_API_KEY. From there, you should have access to the endpoints.
-
-```python
-import getpass
-import os
-
-if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
-    nvidia_api_key = getpass.getpass("Enter your NVIDIA API key: ")
-    assert nvidia_api_key.startswith("nvapi-"), f"{nvidia_api_key[:5]}... is not a valid key"
-    os.environ["NVIDIA_API_KEY"] = nvidia_api_key
-```
-## Working with NVIDIA API Catalog
-
-```python
-from langchain_nvidia_ai_endpoints import ChatNVIDIA
-
-llm = ChatNVIDIA(model="mistralai/mixtral-8x22b-instruct-v0.1")
-result = llm.invoke("Write a ballad about LangChain.")
-print(result.content)
-```
-
-Using the API, you can query live endpoints available on the NVIDIA API Catalog to get quick results from a DGX-hosted cloud compute environment. All models are source-accessible and can be deployed on your own compute cluster using NVIDIA NIM which is part of NVIDIA AI Enterprise, shown in the next section [Working with NVIDIA NIMs](#working-with-nvidia-nims).
-
-## Working with NVIDIA NIMs
-When ready to deploy, you can self-host models with NVIDIA NIM—which is included with the NVIDIA AI Enterprise software license—and run them anywhere, giving you ownership of your customizations and full control of your intellectual property (IP) and AI applications.
-
-[Learn more about NIMs](https://developer.nvidia.com/blog/nvidia-nim-offers-optimized-inference-microservices-for-deploying-ai-models-at-scale/)
-
-```python
-from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank
-
-# connect to a chat NIM running at localhost:8000, specifying a model
-llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta/llama3-8b-instruct")
-
-# connect to an embedding NIM running at localhost:8080
-embedder = NVIDIAEmbeddings(base_url="http://localhost:8080/v1")
-
-# connect to a reranking NIM running at localhost:2016
-ranker = NVIDIARerank(base_url="http://localhost:2016/v1")
-```
-
-## Using NVIDIA AI Foundation Endpoints
-
-A selection of NVIDIA AI Foundation models are supported directly in LangChain with familiar APIs.
-
-The active models which are supported can be found [in API Catalog](https://build.nvidia.com/).
-
-**The following may be useful examples to help you get started:**
-- **[`ChatNVIDIA` Model](/docs/integrations/chat/nvidia_ai_endpoints).**
-- **[`NVIDIAEmbeddings` Model for RAG Workflows](/docs/integrations/text_embedding/nvidia_ai_endpoints).**
diff --git a/langchain_md_files/integrations/providers/obsidian.mdx b/langchain_md_files/integrations/providers/obsidian.mdx
deleted file mode 100644
index ce1169df90acbda16d48f609d58c8ebe94577257..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/obsidian.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Obsidian
-
->[Obsidian](https://obsidian.md/) is a powerful and extensible knowledge base
-that works on top of your local folder of plain text files.
-
-## Installation and Setup
-
-All instructions are in examples below.
-
-## Document Loader
-
-
-See a [usage example](/docs/integrations/document_loaders/obsidian).
-
-
-```python
-from langchain_community.document_loaders import ObsidianLoader
-```
-
diff --git a/langchain_md_files/integrations/providers/oceanbase.mdx b/langchain_md_files/integrations/providers/oceanbase.mdx
deleted file mode 100644
index 30537f139100429261533a23e2e2314258fd501d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/oceanbase.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# OceanBase
-
-[OceanBase Database](https://github.com/oceanbase/oceanbase) is a distributed relational database. 
-It is developed entirely by Ant Group. The OceanBase Database is built on a common server cluster. 
-Based on the Paxos protocol and its distributed structure, the OceanBase Database provides high availability and linear scalability.
-
-OceanBase currently has the ability to store vectors. Users can easily perform the following operations with SQL:
-
-- Create a table containing vector type fields;
-- Create a vector index table based on the HNSW algorithm;
-- Perform vector approximate nearest neighbor queries;
-- ...
-
-## Installation
-
-```bash
-pip install -U langchain-oceanbase
-```
-
-We recommend using Docker to deploy OceanBase:
-
-```shell
-docker run --name=ob433 -e MODE=slim -p 2881:2881 -d oceanbase/oceanbase-ce:4.3.3.0-100000132024100711
-```
-
-[More methods to deploy OceanBase cluster](https://github.com/oceanbase/oceanbase-doc/blob/V4.3.1/en-US/400.deploy/500.deploy-oceanbase-database-community-edition/100.deployment-overview.md)
-
-### Usage
-
-For a more detailed walkthrough of the OceanBase Wrapper, see [this notebook](https://github.com/oceanbase/langchain-oceanbase/blob/main/docs/vectorstores.ipynb)
-
diff --git a/langchain_md_files/integrations/providers/oci.mdx b/langchain_md_files/integrations/providers/oci.mdx
deleted file mode 100644
index 58c167995bfbf54e49442ca3884c45f38477e2ad..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/oci.mdx
+++ /dev/null
@@ -1,49 +0,0 @@
-# Oracle Cloud Infrastructure (OCI)
-
-The `LangChain` integrations related to [Oracle Cloud Infrastructure](https://www.oracle.com/artificial-intelligence/).
-
-## OCI Generative AI
-> Oracle Cloud Infrastructure (OCI) [Generative AI](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm) is a fully managed service that provides a set of state-of-the-art,
-> customizable large language models (LLMs) that cover a wide range of use cases, and which are available through a single API.
-> Using the OCI Generative AI service you can access ready-to-use pretrained models, or create and host your own fine-tuned
-> custom models based on your own data on dedicated AI clusters. 
-
-To use, you should have the latest `oci` python SDK and the langchain_community package installed.
-
-```bash
-pip install -U oci langchain-community
-```
-
-See [chat](/docs/integrations/llms/oci_generative_ai), [complete](/docs/integrations/chat/oci_generative_ai), and [embedding](/docs/integrations/text_embedding/oci_generative_ai) usage examples.
-
-```python
-from langchain_community.chat_models import ChatOCIGenAI
-
-from langchain_community.llms import OCIGenAI
-
-from langchain_community.embeddings import OCIGenAIEmbeddings
-```
-
-## OCI Data Science Model Deployment Endpoint
-
-> [OCI Data Science](https://docs.oracle.com/en-us/iaas/data-science/using/home.htm) is a
-> fully managed and serverless platform for data science teams. Using the OCI Data Science
-> platform you can build, train, and manage machine learning models, and then deploy them
-> as an OCI Model Deployment Endpoint using the
-> [OCI Data Science Model Deployment Service](https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-about.htm).
-
-To use, you should have the latest `oracle-ads` python SDK installed.
-
-```bash
-pip install -U oracle-ads
-```
-
-See [chat](/docs/integrations/chat/oci_data_science) and [complete](/docs/integrations/llms/oci_model_deployment_endpoint) usage examples.
-
-
-```python
-from langchain_community.chat_models import ChatOCIModelDeployment
-
-from langchain_community.llms import OCIModelDeploymentLLM
-```
-
diff --git a/langchain_md_files/integrations/providers/octoai.mdx b/langchain_md_files/integrations/providers/octoai.mdx
deleted file mode 100644
index d4a064c7c7672a7f808bc3d1f95eb9a53edb7b4e..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/octoai.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
-# OctoAI
-
->[OctoAI](https://docs.octoai.cloud/docs) offers easy access to efficient compute 
-> and enables users to integrate their choice of AI models into applications. 
-> The `OctoAI` compute service helps you run, tune, and scale AI applications easily.
-
-
-## Installation and Setup
-
-- Install the `openai` Python package:
-  ```bash
-  pip install openai
-  ````
-- Register on `OctoAI` and get an API Token from [your OctoAI account page](https://octoai.cloud/settings).
-
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/octoai).
-
-```python
-from langchain_community.chat_models import ChatOctoAI
-```
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/octoai).
-
-```python
-from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
-```
-
-## Embedding models
-
-```python
-from langchain_community.embeddings.octoai_embeddings import OctoAIEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/ollama.mdx b/langchain_md_files/integrations/providers/ollama.mdx
deleted file mode 100644
index 6a05b5e2be606b37a11ccc865634629bc9b488f6..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ollama.mdx
+++ /dev/null
@@ -1,73 +0,0 @@
-# Ollama
-
->[Ollama](https://ollama.com/) allows you to run open-source large language models, 
-> such as [Llama3.1](https://ai.meta.com/blog/meta-llama-3-1/), locally.
->
->`Ollama` bundles model weights, configuration, and data into a single package, defined by a Modelfile. 
->It optimizes setup and configuration details, including GPU usage.
->For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).
-
-See [this guide](/docs/how_to/local_llms) for more details
-on how to use `Ollama` with LangChain.
-
-## Installation and Setup
-### Ollama installation
-Follow [these instructions](https://github.com/ollama/ollama?tab=readme-ov-file#ollama) 
-to set up and run a local Ollama instance.
-
-Ollama will start as a background service automatically, if this is disabled, run:
-
-```bash
-# export OLLAMA_HOST=127.0.0.1 # environment variable to set ollama host
-# export OLLAMA_PORT=11434 # environment variable to set the ollama port
-ollama serve
-```
-
-After starting ollama, run `ollama pull <model_checkpoint>` to download a model 
-from the [Ollama model library](https://ollama.ai/library).
-
-```bash
-ollama pull llama3.1
-```
-
-We're now ready to install the `langchain-ollama` partner package and run a model.
-
-### Ollama LangChain partner package install
-Install the integration package with:
-```bash
-pip install langchain-ollama
-```
-## LLM
-
-```python
-from langchain_ollama.llms import OllamaLLM
-```
-
-See the notebook example [here](/docs/integrations/llms/ollama).
-
-## Chat Models
-
-### Chat Ollama
-
-```python
-from langchain_ollama.chat_models import ChatOllama
-```
-
-See the notebook example [here](/docs/integrations/chat/ollama).
-
-### Ollama tool calling
-[Ollama tool calling](https://ollama.com/blog/tool-support) uses the
-OpenAI compatible web server specification, and can be used with
-the default `BaseChatModel.bind_tools()` methods
-as described [here](/docs/how_to/tool_calling/).
-Make sure to select an ollama model that supports [tool calling](https://ollama.com/search?&c=tools).
-
-## Embedding models
-
-```python
-from langchain_community.embeddings import OllamaEmbeddings
-```
-
-See the notebook example [here](/docs/integrations/text_embedding/ollama).
-
-
diff --git a/langchain_md_files/integrations/providers/ontotext_graphdb.mdx b/langchain_md_files/integrations/providers/ontotext_graphdb.mdx
deleted file mode 100644
index 468699cd215856625ac7995941054d3ed4d56849..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/ontotext_graphdb.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Ontotext GraphDB
-
->[Ontotext GraphDB](https://graphdb.ontotext.com/) is a graph database and knowledge discovery tool compliant with RDF and SPARQL.
-
-## Dependencies
-
-Install the [rdflib](https://github.com/RDFLib/rdflib) package with
-```bash
-pip install rdflib==7.0.0
-```
-
-## Graph QA Chain
-
-Connect your GraphDB Database with a chat model to get insights on your data.
-
-See the notebook example [here](/docs/integrations/graphs/ontotext).
-
-```python
-from langchain_community.graphs import OntotextGraphDBGraph
-from langchain.chains import OntotextGraphDBQAChain
-```
diff --git a/langchain_md_files/integrations/providers/openai.mdx b/langchain_md_files/integrations/providers/openai.mdx
deleted file mode 100644
index 57830479e93d2d4f60980631d60632ae03fed54d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/openai.mdx
+++ /dev/null
@@ -1,123 +0,0 @@
----
-keywords: [openai]
----
-
-# OpenAI
-
-All functionality related to OpenAI
-
->[OpenAI](https://en.wikipedia.org/wiki/OpenAI) is American artificial intelligence (AI) research laboratory 
-> consisting of the non-profit `OpenAI Incorporated`
-> and its for-profit subsidiary corporation `OpenAI Limited Partnership`. 
-> `OpenAI` conducts AI research with the declared intention of promoting and developing a friendly AI. 
-> `OpenAI` systems run on an `Azure`-based supercomputing platform from `Microsoft`.
-
->The [OpenAI API](https://platform.openai.com/docs/models) is powered by a diverse set of models with different capabilities and price points.
-> 
->[ChatGPT](https://chat.openai.com) is the Artificial Intelligence (AI) chatbot developed by `OpenAI`.
-
-## Installation and Setup
-
-Install the integration package with
-```bash
-pip install langchain-openai
-```
-
-Get an OpenAI api key and set it as an environment variable (`OPENAI_API_KEY`)
-
-## Chat model
-
-See a [usage example](/docs/integrations/chat/openai).
-
-```python
-from langchain_openai import ChatOpenAI
-```
-
-If you are using a model hosted on `Azure`, you should use different wrapper for that:
-```python
-from langchain_openai import AzureChatOpenAI
-```
-For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integrations/chat/azure_chat_openai).
-
-## LLM
-
-See a [usage example](/docs/integrations/llms/openai).
-
-```python
-from langchain_openai import OpenAI
-```
-
-If you are using a model hosted on `Azure`, you should use different wrapper for that:
-```python
-from langchain_openai import AzureOpenAI
-```
-For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integrations/llms/azure_openai).
-
-## Embedding Model
-
-See a [usage example](/docs/integrations/text_embedding/openai)
-
-```python
-from langchain_openai import OpenAIEmbeddings
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/chatgpt_loader).
-
-```python
-from langchain_community.document_loaders.chatgpt import ChatGPTLoader
-```
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/chatgpt-plugin).
-
-```python
-from langchain.retrievers import ChatGPTPluginRetriever
-```
-
-## Tools
-
-### Dall-E Image Generator
-
->[OpenAI Dall-E](https://openai.com/dall-e-3) are text-to-image models developed by `OpenAI` 
-> using deep learning methodologies to generate digital images from natural language descriptions, 
-> called "prompts".
-
-
-See a [usage example](/docs/integrations/tools/dalle_image_generator).
-
-```python
-from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
-```
-
-## Adapter
-
-See a [usage example](/docs/integrations/adapters/openai).
-
-```python
-from langchain.adapters import openai as lc_openai
-```
-
-## Tokenizer
-
-There are several places you can use the `tiktoken` tokenizer. By default, it is used to count tokens
-for OpenAI LLMs.
-
-You can also use it to count tokens when splitting documents with 
-```python
-from langchain.text_splitter import CharacterTextSplitter
-CharacterTextSplitter.from_tiktoken_encoder(...)
-```
-For a more detailed walkthrough of this, see [this notebook](/docs/how_to/split_by_token/#tiktoken)
-
-## Chain
-
-See a [usage example](https://python.langchain.com/v0.1/docs/guides/productionization/safety/moderation).
-
-```python
-from langchain.chains import OpenAIModerationChain
-```
-
-
diff --git a/langchain_md_files/integrations/providers/openllm.mdx b/langchain_md_files/integrations/providers/openllm.mdx
deleted file mode 100644
index dd09257954774eff31c0e57dac32ea1c5d4e6e62..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/openllm.mdx
+++ /dev/null
@@ -1,61 +0,0 @@
----
-keywords: [openllm]
----
-
-# OpenLLM
-
-OpenLLM lets developers run any **open-source LLMs** as **OpenAI-compatible API** endpoints with **a single command**.
-
-- 🔬 Build for fast and production usages
-- 🚂 Support llama3, qwen2, gemma, etc, and many **quantized** versions [full list](https://github.com/bentoml/openllm-models)
-- ⛓️ OpenAI-compatible API
-- 💬 Built-in ChatGPT like UI
-- 🔥 Accelerated LLM decoding with state-of-the-art inference backends
-- 🌥️ Ready for enterprise-grade cloud deployment (Kubernetes, Docker and BentoCloud)
-
-## Installation and Setup
-
-Install the OpenLLM package via PyPI:
-
-```bash
-pip install openllm
-```
-
-## LLM
-
-OpenLLM supports a wide range of open-source LLMs as well as serving users' own
-fine-tuned LLMs. Use `openllm model` command to see all available models that
-are pre-optimized for OpenLLM.
-
-## Wrappers
-
-There is a OpenLLM Wrapper which supports interacting with running server with OpenLLM:
-
-```python
-from langchain_community.llms import OpenLLM
-```
-
-### Wrapper for OpenLLM server
-
-This wrapper supports interacting with OpenLLM's OpenAI-compatible endpoint.
-
-To run a model, do:
-
-```bash
-openllm hello
-```
-
-Wrapper usage:
-
-```python
-from langchain_community.llms import OpenLLM
-
-llm = OpenLLM(base_url="http://localhost:3000/v1", api_key="na")
-
-llm("What is the difference between a duck and a goose? And why there are so many Goose in Canada?")
-```
-
-### Usage
-
-For a more detailed walkthrough of the OpenLLM Wrapper, see the
-[example notebook](/docs/integrations/llms/openllm)
diff --git a/langchain_md_files/integrations/providers/opensearch.mdx b/langchain_md_files/integrations/providers/opensearch.mdx
deleted file mode 100644
index be55c26d7b225ac8c59b610d7a9b9213c6c2ae61..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/opensearch.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# OpenSearch
-
-This page covers how to use the OpenSearch ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific OpenSearch wrappers.
-
-## Installation and Setup
-- Install the Python package with `pip install opensearch-py`
-## Wrappers
-
-### VectorStore
-
-There exists a wrapper around OpenSearch vector databases, allowing you to use it as a vectorstore 
-for semantic search using approximate vector search powered by lucene, nmslib and faiss engines 
-or using painless scripting and script scoring functions for bruteforce vector search.
-
-To import this vectorstore:
-```python
-from langchain_community.vectorstores import OpenSearchVectorSearch
-```
-
-For a more detailed walkthrough of the OpenSearch wrapper, see [this notebook](/docs/integrations/vectorstores/opensearch)
diff --git a/langchain_md_files/integrations/providers/openweathermap.mdx b/langchain_md_files/integrations/providers/openweathermap.mdx
deleted file mode 100644
index 6e160f805d74a7f0754a46a1249e31b08b2c9fda..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/openweathermap.mdx
+++ /dev/null
@@ -1,44 +0,0 @@
-# OpenWeatherMap
-
->[OpenWeatherMap](https://openweathermap.org/api/) provides all essential weather data for a specific location:
->- Current weather
->- Minute forecast for 1 hour
->- Hourly forecast for 48 hours
->- Daily forecast for 8 days
->- National weather alerts
->- Historical weather data for 40+ years back
-
-This page covers how to use the `OpenWeatherMap API` within LangChain.
-
-## Installation and Setup
-
-- Install requirements with
-```bash
-pip install pyowm
-```
-- Go to OpenWeatherMap and sign up for an account to get your API key [here](https://openweathermap.org/api/)
-- Set your API key as `OPENWEATHERMAP_API_KEY` environment variable
-
-## Wrappers
-
-### Utility
-
-There exists a OpenWeatherMapAPIWrapper utility which wraps this API. To import this utility:
-
-```python
-from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
-```
-
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/openweathermap).
-
-### Tool
-
-You can also easily load this wrapper as a Tool (to use with an Agent).
-You can do this with:
-
-```python
-from langchain.agents import load_tools
-tools = load_tools(["openweathermap-api"])
-```
-
-For more information on tools, see [this page](/docs/how_to/tools_builtin).
diff --git a/langchain_md_files/integrations/providers/oracleai.mdx b/langchain_md_files/integrations/providers/oracleai.mdx
deleted file mode 100644
index 5df9d7eab02461fe5c2b0a715838ea6637b4dcb1..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/oracleai.mdx
+++ /dev/null
@@ -1,67 +0,0 @@
-# OracleAI Vector Search
-
-Oracle AI Vector Search is designed for Artificial Intelligence (AI) workloads that allows you to query data based on semantics, rather than keywords.
-One of the biggest benefits of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system.
-This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.
-
-In addition, your vectors can benefit from all of Oracle Database’s most powerful features, like the following:
-
- * [Partitioning Support](https://www.oracle.com/database/technologies/partitioning.html)
- * [Real Application Clusters scalability](https://www.oracle.com/database/real-application-clusters/)
- * [Exadata smart scans](https://www.oracle.com/database/technologies/exadata/software/smartscan/)
- * [Shard processing across geographically distributed databases](https://www.oracle.com/database/distributed-database/)
- * [Transactions](https://docs.oracle.com/en/database/oracle/oracle-database/23/cncpt/transactions.html)
- * [Parallel SQL](https://docs.oracle.com/en/database/oracle/oracle-database/21/vldbg/parallel-exec-intro.html#GUID-D28717E4-0F77-44F5-BB4E-234C31D4E4BA)
- * [Disaster recovery](https://www.oracle.com/database/data-guard/)
- * [Security](https://www.oracle.com/security/database-security/)
- * [Oracle Machine Learning](https://www.oracle.com/artificial-intelligence/database-machine-learning/)
- * [Oracle Graph Database](https://www.oracle.com/database/integrated-graph-database/)
- * [Oracle Spatial and Graph](https://www.oracle.com/database/spatial/)
- * [Oracle Blockchain](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_blockchain_table.html#GUID-B469E277-978E-4378-A8C1-26D3FF96C9A6)
- * [JSON](https://docs.oracle.com/en/database/oracle/oracle-database/23/adjsn/json-in-oracle-database.html)
-
-
-## Document Loaders
-
-Please check the [usage example](/docs/integrations/document_loaders/oracleai).
-
-```python
-from langchain_community.document_loaders.oracleai import OracleDocLoader
-```
-
-## Text Splitter
-
-Please check the [usage example](/docs/integrations/document_loaders/oracleai).
-
-```python
-from langchain_community.document_loaders.oracleai import OracleTextSplitter
-```
-
-## Embeddings
-
-Please check the [usage example](/docs/integrations/text_embedding/oracleai).
-
-```python
-from langchain_community.embeddings.oracleai import OracleEmbeddings
-```
-
-## Summary
-
-Please check the [usage example](/docs/integrations/tools/oracleai).
-
-```python
-from langchain_community.utilities.oracleai import OracleSummary
-```
-
-## Vector Store
-
-Please check the [usage example](/docs/integrations/vectorstores/oracle).
-
-```python
-from langchain_community.vectorstores.oraclevs import OracleVS
-```
-
-## End to End Demo
-
-Please check the [Oracle AI Vector Search End-to-End Demo Guide](https://github.com/langchain-ai/langchain/blob/master/cookbook/oracleai_demo.ipynb).
-
diff --git a/langchain_md_files/integrations/providers/outline.mdx b/langchain_md_files/integrations/providers/outline.mdx
deleted file mode 100644
index 44335477ad7e3fbb1c4e6e2c9918869ed9709f51..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/outline.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Outline
-
-> [Outline](https://www.getoutline.com/) is an open-source collaborative knowledge base platform designed for team information sharing.
-
-## Setup
-
-You first need to [create an api key](https://www.getoutline.com/developers#section/Authentication) for your Outline instance. Then you need to set the following environment variables:
-
-```python
-import os
-
-os.environ["OUTLINE_API_KEY"] = "xxx"
-os.environ["OUTLINE_INSTANCE_URL"] = "https://app.getoutline.com"
-```
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/outline).
-
-```python
-from langchain.retrievers import OutlineRetriever
-```
diff --git a/langchain_md_files/integrations/providers/outlines.mdx b/langchain_md_files/integrations/providers/outlines.mdx
deleted file mode 100644
index bcae27ec50abb3ce004a3dd7fca145f5f2768030..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/outlines.mdx
+++ /dev/null
@@ -1,201 +0,0 @@
-# Outlines
-
->[Outlines](https://github.com/dottxt-ai/outlines) is a Python library for constrained language generation. It provides a unified interface to various language models and allows for structured generation using techniques like regex matching, type constraints, JSON schemas, and context-free grammars.
-
-Outlines supports multiple backends, including:
-- Hugging Face Transformers
-- llama.cpp
-- vLLM
-- MLX
-
-This integration allows you to use Outlines models with LangChain, providing both LLM and chat model interfaces.
-
-## Installation and Setup
-
-To use Outlines with LangChain, you'll need to install the Outlines library:
-
-```bash
-pip install outlines
-```
-
-Depending on the backend you choose, you may need to install additional dependencies:
-
-- For Transformers: `pip install transformers torch datasets`
-- For llama.cpp: `pip install llama-cpp-python`
-- For vLLM: `pip install vllm`
-- For MLX: `pip install mlx`
-
-## LLM
-
-To use Outlines as an LLM in LangChain, you can use the `Outlines` class:
-
-```python
-from langchain_community.llms import Outlines
-```
-
-## Chat Models
-
-To use Outlines as a chat model in LangChain, you can use the `ChatOutlines` class:
-
-```python
-from langchain_community.chat_models import ChatOutlines
-```
-
-## Model Configuration
-
-Both `Outlines` and `ChatOutlines` classes share similar configuration options:
-
-```python
-model = Outlines(
-    model="meta-llama/Llama-2-7b-chat-hf",  # Model identifier
-    backend="transformers",  # Backend to use (transformers, llamacpp, vllm, or mlxlm)
-    max_tokens=256,  # Maximum number of tokens to generate
-    stop=["\n"],  # Optional list of stop strings
-    streaming=True,  # Whether to stream the output
-    # Additional parameters for structured generation:
-    regex=None,
-    type_constraints=None,
-    json_schema=None,
-    grammar=None,
-    # Additional model parameters:
-    model_kwargs={"temperature": 0.7}
-)
-```
-
-### Model Identifier
-
-The `model` parameter can be:
-- A Hugging Face model name (e.g., "meta-llama/Llama-2-7b-chat-hf")
-- A local path to a model
-- For GGUF models, the format is "repo_id/file_name" (e.g., "TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q4_K_M.gguf")
-
-### Backend Options
-
-The `backend` parameter specifies which backend to use:
-- `"transformers"`: For Hugging Face Transformers models (default)
-- `"llamacpp"`: For GGUF models using llama.cpp
-- `"transformers_vision"`: For vision-language models (e.g., LLaVA)
-- `"vllm"`: For models using the vLLM library
-- `"mlxlm"`: For models using the MLX framework
-
-### Structured Generation
-
-Outlines provides several methods for structured generation:
-
-1. **Regex Matching**:
-   ```python
-   model = Outlines(
-       model="meta-llama/Llama-2-7b-chat-hf",
-       regex=r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)"
-   )
-   ```
-   This will ensure the generated text matches the specified regex pattern (in this case, a valid IP address).
-
-2. **Type Constraints**:
-   ```python
-   model = Outlines(
-       model="meta-llama/Llama-2-7b-chat-hf",
-       type_constraints=int
-   )
-   ```
-   This restricts the output to valid Python types (int, float, bool, datetime.date, datetime.time, datetime.datetime).
-
-3. **JSON Schema**:
-   ```python
-   from pydantic import BaseModel
-
-   class Person(BaseModel):
-       name: str
-       age: int
-
-   model = Outlines(
-       model="meta-llama/Llama-2-7b-chat-hf",
-       json_schema=Person
-   )
-   ```
-   This ensures the generated output adheres to the specified JSON schema or Pydantic model.
-
-4. **Context-Free Grammar**:
-   ```python
-   model = Outlines(
-       model="meta-llama/Llama-2-7b-chat-hf",
-       grammar="""
-           ?start: expression
-           ?expression: term (("+" | "-") term)*
-           ?term: factor (("*" | "/") factor)*
-           ?factor: NUMBER | "-" factor | "(" expression ")"
-           %import common.NUMBER
-       """
-   )
-   ```
-   This generates text that adheres to the specified context-free grammar in EBNF format.
-
-## Usage Examples
-
-### LLM Example
-
-```python
-from langchain_community.llms import Outlines
-
-llm = Outlines(model="meta-llama/Llama-2-7b-chat-hf", max_tokens=100)
-result = llm.invoke("Tell me a short story about a robot.")
-print(result)
-```
-
-### Chat Model Example
-
-```python
-from langchain_community.chat_models import ChatOutlines
-from langchain_core.messages import HumanMessage, SystemMessage
-
-chat = ChatOutlines(model="meta-llama/Llama-2-7b-chat-hf", max_tokens=100)
-messages = [
-    SystemMessage(content="You are a helpful AI assistant."),
-    HumanMessage(content="What's the capital of France?")
-]
-result = chat.invoke(messages)
-print(result.content)
-```
-
-### Streaming Example
-
-```python
-from langchain_community.chat_models import ChatOutlines
-from langchain_core.messages import HumanMessage
-
-chat = ChatOutlines(model="meta-llama/Llama-2-7b-chat-hf", streaming=True)
-for chunk in chat.stream("Tell me a joke about programming."):
-    print(chunk.content, end="", flush=True)
-print()
-```
-
-### Structured Output Example
-
-```python
-from langchain_community.llms import Outlines
-from pydantic import BaseModel
-
-class MovieReview(BaseModel):
-    title: str
-    rating: int
-    summary: str
-
-llm = Outlines(
-    model="meta-llama/Llama-2-7b-chat-hf",
-    json_schema=MovieReview
-)
-result = llm.invoke("Write a short review for the movie 'Inception'.")
-print(result)
-```
-
-## Additional Features
-
-### Tokenizer Access
-
-You can access the underlying tokenizer for the model:
-
-```python
-tokenizer = llm.tokenizer
-encoded = tokenizer.encode("Hello, world!")
-decoded = tokenizer.decode(encoded)
-```
diff --git a/langchain_md_files/integrations/providers/pandas.mdx b/langchain_md_files/integrations/providers/pandas.mdx
deleted file mode 100644
index 15519b0b0f7927536cea800cbee7069551296034..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/pandas.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
-# Pandas
-
->[pandas](https://pandas.pydata.org) is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,
-built on top of the `Python` programming language.
-
-## Installation and Setup
-
-Install the `pandas` package using `pip`:
-
-```bash
-pip install pandas
-```
-
-
-## Document loader
-
-See a [usage example](/docs/integrations/document_loaders/pandas_dataframe).
-
-```python
-from langchain_community.document_loaders import DataFrameLoader
-```
-
-## Toolkit
-
-See a [usage example](/docs/integrations/tools/pandas).
-
-```python
-from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
-```
diff --git a/langchain_md_files/integrations/providers/payman-tool.mdx b/langchain_md_files/integrations/providers/payman-tool.mdx
deleted file mode 100644
index a8a312d8d21a464d799878f4c541d27bb0b8e7e9..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/payman-tool.mdx
+++ /dev/null
@@ -1,114 +0,0 @@
-# PaymanAI
-
-PaymanAI provides functionality to send and receive payments (fiat and crypto) on behalf of an AI Agent. To get started:
-
-1. **Sign up** at app.paymanai.com to create an AI Agent and obtain your **API Key**.
-2. **Set** environment variables (`PAYMAN_API_SECRET` for your API Key, `PAYMAN_ENVIRONMENT` for sandbox or production).
-
-This notebook gives a quick overview of integrating PaymanAI into LangChain as a tool. For complete reference, see the API documentation.
-
-## Overview
-
-The PaymanAI integration is part of the `langchain-community` (or your custom) package. It allows you to:
-
-- Send payments (`send_payment`) to crypto addresses or bank accounts.
-- Search for payees (`search_payees`).
-- Add new payees (`add_payee`).
-- Request money from customers with a hosted checkout link (`ask_for_money`).
-- Check agent or customer balances (`get_balance`).
-
-These can be wrapped as **LangChain Tools** for an LLM-based agent to call them automatically.
-
-### Integration details
-
-| Class | Package | Serializable | JS support | Package latest |
-| :--- | :--- | :---: | :---: | :--- |
-| PaymanAI | `langchain-payman-tool` | ❌ | ❌ | [PyPI Version] |
-
-If you're simply calling the PaymanAI SDK, you can do it directly or via the **Tool** interface in LangChain.
-
-## Setup
-
-1. **Install** the PaymanAI tool package:
-
-```bash
-pip install langchain-payman-tool
-```
-
-2. **Install** the PaymanAI SDK:
-```bash
-pip install paymanai
-```
-
-3. **Set** environment variables:
-```bash
-export PAYMAN_API_SECRET="YOUR_SECRET_KEY"
-export PAYMAN_ENVIRONMENT="sandbox"
-```
-
-Your `PAYMAN_API_SECRET` should be the secret key from app.paymanai.com. The `PAYMAN_ENVIRONMENT` can be `sandbox` or `production` depending on your usage.
-
-## Instantiation
-
-Here is an example of instantiating a PaymanAI tool. If you have multiple Payman methods, you can create multiple tools.
-
-```python
-from langchain_payman_tool.tool import PaymanAI
-
-# Instantiate the PaymanAI tool (example)
-tool = PaymanAI(
-    name="send_payment",
-    description="Send a payment to a specified payee.",
-)
-```
-
-## Invocation
-
-### Invoke directly with args
-
-You can call `tool.invoke(...)` and pass a dictionary matching the tool's expected fields. For example:
-
-```python
-response = tool.invoke({
-    "amount_decimal": 10.00,
-    "payment_destination_id": "abc123",
-    "customer_id": "cust_001",
-    "memo": "Payment for invoice #XYZ"
-})
-```
-
-### Invoke with ToolCall
-
-When used inside an AI workflow, the LLM might produce a `ToolCall` dict. You can simulate it as follows:
-
-```python
-model_generated_tool_call = {
-    "args": {
-        "amount_decimal": 10.00,
-        "payment_destination_id": "abc123"
-    },
-    "id": "1",
-    "name": tool.name,
-    "type": "tool_call",
-}
-tool.invoke(model_generated_tool_call)
-```
-
-## Using the Tool in a Chain or Agent
-
-You can bind a PaymanAI tool to a LangChain agent or chain that supports tool-calling.
-
-## Quick Start Summary
-
-1. **Sign up** at app.paymanai.com to get your **API Key**.
-2. **Install** dependencies:
-   ```bash
-   pip install paymanai langchain-payman-tool
-   ```
-3. **Export** environment variables:
-   ```bash
-   export PAYMAN_API_SECRET="YOUR_SECRET_KEY"
-   export PAYMAN_ENVIRONMENT="sandbox"
-   ```
-4. **Instantiate** a PaymanAI tool, passing your desired name/description.
-5. **Call** the tool with `.invoke(...)` or integrate it into a chain or agent.
diff --git a/langchain_md_files/integrations/providers/permit.mdx b/langchain_md_files/integrations/providers/permit.mdx
deleted file mode 100644
index ff1cb10537737b486e9e70c97c946ab26463b4db..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/permit.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# Permit
-
-[Permit.io](https://permit.io/) offers fine-grained access control and policy
-enforcement. With LangChain, you can integrate Permit checks to ensure only authorized
-users can access or retrieve data in your LLM applications.
-
-## Installation and Setup
-
-```bash
-pip install langchain-permit
-pip install permit
-```
-
-Set environment variables for your Permit PDP and credentials:
-
-```python
-export PERMIT_API_KEY="your_permit_api_key"
-export PERMIT_PDP_URL="http://localhost:7766"   # or your real PDP endpoint
-```
-
-Make sure your PDP is running and configured. See
-[Permit Docs](https://docs.permit.io/sdk/python/quickstart-python/#2-setup-your-pdp-policy-decision-point-container)
-for policy setup.
-
-## Tools
-
-See detail on available tools [here](/docs/integrations/tools/permit).
-
-## Retrievers
-
-See detail on available retrievers [here](/docs/integrations/retrievers/permit).
diff --git a/langchain_md_files/integrations/providers/perplexity.mdx b/langchain_md_files/integrations/providers/perplexity.mdx
deleted file mode 100644
index 9e89994f54d101cb40e5e1de952a7266840f7ac5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/perplexity.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Perplexity
-
->[Perplexity](https://www.perplexity.ai/pro) is the most powerful way to search 
-> the internet with unlimited Pro Search, upgraded AI models, unlimited file upload, 
-> image generation, and API credits.
->
-> You can check a [list of available models](https://docs.perplexity.ai/docs/model-cards).
-
-## Installation and Setup
-
-Install a Python package:
-
-```bash
-pip install openai
-````
-
-Get your API key from [here](https://docs.perplexity.ai/docs/getting-started).
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/perplexity).
-
-```python
-from langchain_community.chat_models import ChatPerplexity
-```
diff --git a/langchain_md_files/integrations/providers/petals.mdx b/langchain_md_files/integrations/providers/petals.mdx
deleted file mode 100644
index db85c3cfc80e6a0441cf967ac64c26e6bb593f01..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/petals.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Petals
-
-This page covers how to use the Petals ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific Petals wrappers.
-
-## Installation and Setup
-- Install with `pip install petals`
-- Get a Hugging Face api key and set it as an environment variable (`HUGGINGFACE_API_KEY`)
-
-## Wrappers
-
-### LLM
-
-There exists an Petals LLM wrapper, which you can access with 
-```python
-from langchain_community.llms import Petals
-```
diff --git a/langchain_md_files/integrations/providers/pg_embedding.mdx b/langchain_md_files/integrations/providers/pg_embedding.mdx
deleted file mode 100644
index 9bcd05bd27cd589ad9895b4fe77c4ab380a02f17..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/pg_embedding.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Postgres Embedding
-
-> [pg_embedding](https://github.com/neondatabase/pg_embedding) is an open-source package for
-> vector similarity search using `Postgres` and the `Hierarchical Navigable Small Worlds`
-> algorithm for approximate nearest neighbor search.
-
-## Installation and Setup
-
-We need to install several python packages.
-
-```bash
-pip install psycopg2-binary
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/pgembedding).
-
-```python
-from langchain_community.vectorstores import PGEmbedding
-```
-
diff --git a/langchain_md_files/integrations/providers/pgvector.mdx b/langchain_md_files/integrations/providers/pgvector.mdx
deleted file mode 100644
index c98aaea19a98d876055276e21874a190adccf30b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/pgvector.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
-# PGVector
-
-This page covers how to use the Postgres [PGVector](https://github.com/pgvector/pgvector) ecosystem within LangChain
-It is broken into two parts: installation and setup, and then references to specific PGVector wrappers.
-
-## Installation
-- Install the Python package with `pip install pgvector`
-
-
-## Setup
-1. The first step is to create a database with the `pgvector` extension installed.
-
-    Follow the steps at [PGVector Installation Steps](https://github.com/pgvector/pgvector#installation) to install the database and the extension. The docker image is the easiest way to get started.
-
-## Wrappers
-
-### VectorStore
-
-There exists a wrapper around Postgres vector databases, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-To import this vectorstore:
-```python
-from langchain_community.vectorstores.pgvector import PGVector
-```
-
-### Usage
-
-For a more detailed walkthrough of the PGVector Wrapper, see [this notebook](/docs/integrations/vectorstores/pgvector)
diff --git a/langchain_md_files/integrations/providers/pinecone.mdx b/langchain_md_files/integrations/providers/pinecone.mdx
deleted file mode 100644
index c9c84822034735a0db0a5a387ccad151e7e10a69..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/pinecone.mdx
+++ /dev/null
@@ -1,51 +0,0 @@
----
-keywords: [pinecone]
----
-
-# Pinecone
-
->[Pinecone](https://docs.pinecone.io/docs/overview) is a vector database with broad functionality.
-
-
-## Installation and Setup
-
-Install the Python SDK:
-
-```bash
-pip install langchain-pinecone
-```
-
-
-## Vector store
-
-There exists a wrapper around Pinecone indexes, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-```python
-from langchain_pinecone import PineconeVectorStore
-```
-
-For a more detailed walkthrough of the Pinecone vectorstore, see [this notebook](/docs/integrations/vectorstores/pinecone)
-
-## Retrievers
-
-### Pinecone Hybrid Search
-
-```bash
-pip install pinecone pinecone-text
-```
-
-```python
-from langchain_community.retrievers import (
-    PineconeHybridSearchRetriever,
-)
-```
-
-For more detailed information, see [this notebook](/docs/integrations/retrievers/pinecone_hybrid_search).
-
-
-### Self Query retriever
-
-Pinecone vector store can be used as a retriever for self-querying.
-
-For more detailed information, see [this notebook](/docs/integrations/retrievers/self_query/pinecone).
diff --git a/langchain_md_files/integrations/providers/pipelineai.mdx b/langchain_md_files/integrations/providers/pipelineai.mdx
deleted file mode 100644
index e13f6cffc5cd32f46f86ad71903e0b291ba1f66e..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/pipelineai.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# PipelineAI
-
-This page covers how to use the PipelineAI ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific PipelineAI wrappers.
-
-## Installation and Setup
-
-- Install with `pip install pipeline-ai`
-- Get a Pipeline Cloud api key and set it as an environment variable (`PIPELINE_API_KEY`)
-
-## Wrappers
-
-### LLM
-
-There exists a PipelineAI LLM wrapper, which you can access with
-
-```python
-from langchain_community.llms import PipelineAI
-```
diff --git a/langchain_md_files/integrations/providers/predictionguard.mdx b/langchain_md_files/integrations/providers/predictionguard.mdx
deleted file mode 100644
index bd7eea8330b7c9a7a748e8bfc28a9a8643372339..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/predictionguard.mdx
+++ /dev/null
@@ -1,81 +0,0 @@
-# Prediction Guard
-
-This page covers how to use the Prediction Guard ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific Prediction Guard wrappers.
-
-This integration is maintained in the [langchain-predictionguard](https://github.com/predictionguard/langchain-predictionguard)
-package.
-
-## Installation and Setup
-
-- Install the PredictionGuard Langchain partner package:
-```
-pip install langchain-predictionguard
-```
-
-- Get a Prediction Guard API key (as described [here](https://docs.predictionguard.com/)) and set it as an environment variable (`PREDICTIONGUARD_API_KEY`)
-
-## Prediction Guard Langchain Integrations
-|API|Description|Endpoint Docs| Import                                                  | Example Usage                                                                 |
-|---|---|---|---------------------------------------------------------|-------------------------------------------------------------------------------|
-|Chat|Build Chat Bots|[Chat](https://docs.predictionguard.com/api-reference/api-reference/chat-completions)| `from langchain_predictionguard import ChatPredictionGuard` | [ChatPredictionGuard.ipynb](/docs/integrations/chat/predictionguard)             |
-|Completions|Generate Text|[Completions](https://docs.predictionguard.com/api-reference/api-reference/completions)| `from langchain_predictionguard import PredictionGuard` | [PredictionGuard.ipynb](/docs/integrations/llms/predictionguard)                     |
-|Text Embedding|Embed String to Vectores|[Embeddings](https://docs.predictionguard.com/api-reference/api-reference/embeddings)| `from langchain_predictionguard import PredictionGuardEmbeddings` | [PredictionGuardEmbeddings.ipynb](/docs/integrations/text_embedding/predictionguard) |
-
-## Getting Started
-
-## Chat Models
-
-### Prediction Guard Chat
-
-See a [usage example](/docs/integrations/chat/predictionguard)
-
-```python
-from langchain_predictionguard import ChatPredictionGuard
-```
-
-#### Usage
-
-```python
-# If predictionguard_api_key is not passed, default behavior is to use the `PREDICTIONGUARD_API_KEY` environment variable.
-chat = ChatPredictionGuard(model="Hermes-3-Llama-3.1-8B")
-
-chat.invoke("Tell me a joke")
-```
-
-## Embedding Models
-
-### Prediction Guard Embeddings
-
-See a [usage example](/docs/integrations/text_embedding/predictionguard)
-
-```python
-from langchain_predictionguard import PredictionGuardEmbeddings
-```
-
-#### Usage
-```python
-# If predictionguard_api_key is not passed, default behavior is to use the `PREDICTIONGUARD_API_KEY` environment variable.
-embeddings = PredictionGuardEmbeddings(model="bridgetower-large-itm-mlm-itc")
-
-text = "This is an embedding example."
-output = embeddings.embed_query(text)
-```
-
-## LLMs
-
-### Prediction Guard LLM
-
-See a [usage example](/docs/integrations/llms/predictionguard)
-
-```python
-from langchain_predictionguard import PredictionGuard
-```
-
-#### Usage
-```python
-# If predictionguard_api_key is not passed, default behavior is to use the `PREDICTIONGUARD_API_KEY` environment variable.
-llm = PredictionGuard(model="Hermes-2-Pro-Llama-3-8B")
-
-llm.invoke("Tell me a joke about bears")
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/promptlayer.mdx b/langchain_md_files/integrations/providers/promptlayer.mdx
deleted file mode 100644
index 550ff28f35b65e64ed603ecb6da7415f64132d69..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/promptlayer.mdx
+++ /dev/null
@@ -1,49 +0,0 @@
-# PromptLayer
-
->[PromptLayer](https://docs.promptlayer.com/introduction) is a platform for prompt engineering. 
-> It also helps with the LLM observability to visualize requests, version prompts, and track usage.
->
->While `PromptLayer` does have LLMs that integrate directly with LangChain (e.g. 
-> [`PromptLayerOpenAI`](https://docs.promptlayer.com/languages/langchain)), 
-> using a callback is the recommended way to integrate `PromptLayer` with LangChain.
-
-## Installation and Setup
-
-To work with `PromptLayer`, we have to:
-- Create a `PromptLayer` account
-- Create an api token and set it as an environment variable (`PROMPTLAYER_API_KEY`)
-
-Install a Python package:
-
-```bash
-pip install promptlayer
-```
-
-
-## Callback
-
-See a [usage example](/docs/integrations/callbacks/promptlayer).
-
-```python
-import promptlayer  # Don't forget this import!
-from langchain.callbacks import PromptLayerCallbackHandler
-```
-
-
-## LLM
-
-See a [usage example](/docs/integrations/llms/promptlayer_openai).
-
-```python
-from langchain_community.llms import PromptLayerOpenAI
-```
-
-
-## Chat Models
-
-See a [usage example](/docs/integrations/chat/promptlayer_chatopenai).
-
-```python
-from langchain_community.chat_models import PromptLayerChatOpenAI
-```
-
diff --git a/langchain_md_files/integrations/providers/psychic.mdx b/langchain_md_files/integrations/providers/psychic.mdx
deleted file mode 100644
index a415f8a5a484670dbdfa05d9b726c95b92a130c8..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/psychic.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
----
-sidebar_class_name: hidden
----
-
-# Psychic
-
-:::warning
-This provider is no longer maintained, and may not work. Use with caution.
-:::
-
->[Psychic](https://www.psychic.dev/) is a platform for integrating with SaaS tools like `Notion`, `Zendesk`, 
-> `Confluence`, and `Google Drive` via OAuth and syncing documents from these applications to your SQL or vector
-> database. You can think of it like Plaid for unstructured data. 
-
-## Installation and Setup
-
-```bash
-pip install psychicapi
-```
-
-Psychic is easy to set up - you import the `react` library and configure it with your `Sidekick API` key, which you get 
-from the [Psychic dashboard](https://dashboard.psychic.dev/). When you connect the applications, you  
-view these connections from the dashboard and retrieve data using the server-side libraries.
- 
-1. Create an account in the [dashboard](https://dashboard.psychic.dev/).
-2. Use the [react library](https://docs.psychic.dev/sidekick-link) to add the Psychic link modal to your frontend react app. You will use this to connect the SaaS apps.
-3. Once you have created a connection, you can use the `PsychicLoader` by following the [example notebook](/docs/integrations/document_loaders/psychic)
-
-
-## Advantages vs Other Document Loaders
-
-1.	**Universal API:** Instead of building OAuth flows and learning the APIs for every SaaS app, you integrate Psychic once and leverage our universal API to retrieve data.
-2.	**Data Syncs:** Data in your customers' SaaS apps can get stale fast. With Psychic you can configure webhooks to keep your documents up to date on a daily or realtime basis.
-3.	**Simplified OAuth:** Psychic handles OAuth end-to-end so that you don't have to spend time creating OAuth clients for each integration, keeping access tokens fresh, and handling OAuth redirect logic.
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/pull-md.mdx b/langchain_md_files/integrations/providers/pull-md.mdx
deleted file mode 100644
index b7384a3eda4773201284079d7484f46562bd6f98..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/pull-md.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
-# PullMd Loader
-
->[PullMd](https://pull.md/) is a service that converts web pages into Markdown format. The `langchain-pull-md` package utilizes this service to convert URLs, especially those rendered with JavaScript frameworks like React, Angular, or Vue.js, into Markdown without the need for local rendering.
-
-## Installation and Setup
-
-To get started with `langchain-pull-md`, you need to install the package via pip:
-
-```bash
-pip install langchain-pull-md
-```
-
-See the [usage example](/docs/integrations/document_loaders/pull_md) for detailed integration and usage instructions.
-
-## Document Loader
-
-The `PullMdLoader` class in `langchain-pull-md` provides an easy way to convert URLs to Markdown. It's particularly useful for loading content from modern web applications for use within LangChain's processing capabilities.
-
-```python
-from langchain_pull_md import PullMdLoader
-
-# Initialize the loader with a URL of a JavaScript-rendered webpage
-loader = PullMdLoader(url='https://example.com')
-
-# Load the content as a Document
-documents = loader.load()
-
-# Access the Markdown content
-for document in documents:
-    print(document.page_content)
-```
-
-This loader supports any URL and is particularly adept at handling sites built with dynamic JavaScript, making it a versatile tool for markdown extraction in data processing workflows.
-
-## API Reference
-
-For a comprehensive guide to all available functions and their parameters, visit the [API reference](https://github.com/chigwell/langchain-pull-md).
-
-## Additional Resources
-
-- [GitHub Repository](https://github.com/chigwell/langchain-pull-md)
-- [PyPi Package](https://pypi.org/project/langchain-pull-md/)
diff --git a/langchain_md_files/integrations/providers/pygmalionai.mdx b/langchain_md_files/integrations/providers/pygmalionai.mdx
deleted file mode 100644
index 2d98fdf38c0b62b44eecc38d46f2b5fc337f719e..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/pygmalionai.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# PygmalionAI
-
->[PygmalionAI](https://pygmalion.chat/) is a company supporting the 
-> open-source models by serving the inference endpoint 
-> for the [Aphrodite Engine](https://github.com/PygmalionAI/aphrodite-engine).
-
-
-## Installation and Setup
-
-
-```bash
-pip install aphrodite-engine
-```
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/aphrodite).
-
-```python
-from langchain_community.llms import Aphrodite
-```
diff --git a/langchain_md_files/integrations/providers/qdrant.mdx b/langchain_md_files/integrations/providers/qdrant.mdx
deleted file mode 100644
index ad047ce07212610824f1d4314b70886e0e8b09ff..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/qdrant.mdx
+++ /dev/null
@@ -1,40 +0,0 @@
-# Qdrant
-
->[Qdrant](https://qdrant.tech/documentation/) (read: quadrant) is a vector similarity search engine. 
-> It provides a production-ready service with a convenient API to store, search, and manage 
-> points - vectors with an additional payload. `Qdrant` is tailored to extended filtering support.
-
-
-## Installation and Setup
-
-Install the Python partner package:
-
-```bash
-pip install langchain-qdrant
-```
-
-## Embedding models
-
-### FastEmbedSparse
-
-```python
-from langchain_qdrant import FastEmbedSparse
-```
-
-### SparseEmbeddings
-
-```python
-from langchain_qdrant import SparseEmbeddings
-```
-
-## Vector Store
-
-There exists a wrapper around `Qdrant` indexes, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-To import this vectorstore:
-```python
-from langchain_qdrant import QdrantVectorStore
-```
-
-For a more detailed walkthrough of the Qdrant wrapper, see [this notebook](/docs/integrations/vectorstores/qdrant)
diff --git a/langchain_md_files/integrations/providers/rank_bm25.mdx b/langchain_md_files/integrations/providers/rank_bm25.mdx
deleted file mode 100644
index 0deffeec23ed4a308b8fa539978760651993945f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/rank_bm25.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# rank_bm25
-
-[rank_bm25](https://github.com/dorianbrown/rank_bm25) is an open-source collection of algorithms
-designed to query documents and return the most relevant ones, commonly used for creating
-search engines.
-
-See its [project page](https://github.com/dorianbrown/rank_bm25) for available algorithms.
-
-
-## Installation and Setup
-
-First, you need to install `rank_bm25` python package.
-
-```bash
-pip install rank_bm25
-```
-
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/bm25).
-
-```python
-from langchain_community.retrievers import BM25Retriever
-```
diff --git a/langchain_md_files/integrations/providers/reddit.mdx b/langchain_md_files/integrations/providers/reddit.mdx
deleted file mode 100644
index 5e806075513cdb04087a03fbfa0d8ba957fbfc5d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/reddit.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Reddit
-
->[Reddit](https://www.reddit.com) is an American social news aggregation, content rating, and discussion website.
-
-## Installation and Setup
-
-First, you need to install a python package.
-
-```bash
-pip install praw
-```
-
-Make a [Reddit Application](https://www.reddit.com/prefs/apps/) and initialize the loader with your Reddit API credentials.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/reddit).
-
-
-```python
-from langchain_community.document_loaders import RedditPostsLoader
-```
diff --git a/langchain_md_files/integrations/providers/redis.mdx b/langchain_md_files/integrations/providers/redis.mdx
deleted file mode 100644
index 1850ca312e89d7ef11128638f3ada52598c87483..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/redis.mdx
+++ /dev/null
@@ -1,138 +0,0 @@
-# Redis
-
->[Redis (Remote Dictionary Server)](https://en.wikipedia.org/wiki/Redis) is an open-source in-memory storage, 
-> used as a distributed, in-memory key–value database, cache and message broker, with optional durability. 
-> Because it holds all data in memory and because of its design, `Redis` offers low-latency reads and writes, 
-> making it particularly suitable for use cases that require a cache. Redis is the most popular NoSQL database, 
-> and one of the most popular databases overall.
-
-This page covers how to use the [Redis](https://redis.com) ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific Redis wrappers.
-
-## Installation and Setup
-
-Install the Python SDK:
-
-```bash
-pip install redis
-```
-
-To run Redis locally, you can use Docker:
-
-```bash
-docker run --name langchain-redis -d -p 6379:6379 redis redis-server --save 60 1 --loglevel warning
-```
-
-To stop the container:
-
-```bash
-docker stop langchain-redis
-```
-
-And to start it again:
-
-```bash
-docker start langchain-redis
-```
-
-### Connections
-
-We need a redis url connection string to connect to the database support either a stand alone Redis server
-or a High-Availability setup with Replication and Redis Sentinels.
-
-#### Redis Standalone connection url
-For standalone `Redis` server, the official redis connection url formats can be used as describe in the python redis modules
-"from_url()" method [Redis.from_url](https://redis-py.readthedocs.io/en/stable/connections.html#redis.Redis.from_url)
-
-Example: `redis_url = "redis://:secret-pass@localhost:6379/0"`
-
-#### Redis Sentinel connection url
-
-For [Redis sentinel setups](https://redis.io/docs/management/sentinel/) the connection scheme is "redis+sentinel". 
-This is an unofficial extensions to the official IANA registered protocol schemes as long as there is no connection url
-for Sentinels available.
-
-Example: `redis_url = "redis+sentinel://:secret-pass@sentinel-host:26379/mymaster/0"`
-
-The format is  `redis+sentinel://[[username]:[password]]@[host-or-ip]:[port]/[service-name]/[db-number]`
-with the default values of "service-name = mymaster" and "db-number = 0" if not set explicit.
-The service-name is the redis server monitoring group name as configured within the Sentinel. 
-
-The current url format limits the connection string to one sentinel host only (no list can be given) and
-booth Redis server and sentinel must have the same password set (if used).
-
-#### Redis Cluster connection url
-
-Redis cluster is not supported right now for all methods requiring a "redis_url" parameter.
-The only way to use a Redis Cluster is with LangChain classes accepting a preconfigured Redis client like `RedisCache`
-(example below).
-
-## Cache
-
-The Cache wrapper allows for [Redis](https://redis.io) to be used as a remote, low-latency, in-memory cache for LLM prompts and responses.
-
-### Standard Cache
-The standard cache is the Redis bread & butter of use case in production for both [open-source](https://redis.io) and [enterprise](https://redis.com) users globally.
-
-```python
-from langchain.cache import RedisCache
-```
-
-To use this cache with your LLMs:
-```python
-from langchain.globals import set_llm_cache
-import redis
-
-redis_client = redis.Redis.from_url(...)
-set_llm_cache(RedisCache(redis_client))
-```
-
-### Semantic Cache
-Semantic caching allows users to retrieve cached prompts based on semantic similarity between the user input and previously cached results. Under the hood it blends Redis as both a cache and a vectorstore.
-
-```python
-from langchain.cache import RedisSemanticCache
-```
-
-To use this cache with your LLMs:
-```python
-from langchain.globals import set_llm_cache
-import redis
-
-# use any embedding provider...
-from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
-
-redis_url = "redis://localhost:6379"
-
-set_llm_cache(RedisSemanticCache(
-    embedding=FakeEmbeddings(),
-    redis_url=redis_url
-))
-```
-
-## VectorStore
-
-The vectorstore wrapper turns Redis into a low-latency [vector database](https://redis.com/solutions/use-cases/vector-database/) for semantic search or LLM content retrieval.
-
-```python
-from langchain_community.vectorstores import Redis
-```
-
-For a more detailed walkthrough of the Redis vectorstore wrapper, see [this notebook](/docs/integrations/vectorstores/redis).
-
-## Retriever
-
-The Redis vector store retriever wrapper generalizes the vectorstore class to perform 
-low-latency document retrieval. To create the retriever, simply 
-call `.as_retriever()` on the base vectorstore class.
-
-## Memory
-
-Redis can be used to persist LLM conversations.
-
-### Vector Store Retriever Memory
-
-For a more detailed walkthrough of the `VectorStoreRetrieverMemory` wrapper, see [this notebook](https://python.langchain.com/api_reference/langchain/memory/langchain.memory.vectorstore.VectorStoreRetrieverMemory.html).
-
-### Chat Message History Memory
-For a detailed example of Redis to cache conversation message history, see [this notebook](/docs/integrations/memory/redis_chat_message_history).
diff --git a/langchain_md_files/integrations/providers/remembrall.mdx b/langchain_md_files/integrations/providers/remembrall.mdx
deleted file mode 100644
index 822acab815ad047d17c6d9be44bc805a12000bf9..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/remembrall.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
-# Remembrall
-
->[Remembrall](https://remembrall.dev/) is a platform that gives a language model 
-> long-term memory, retrieval augmented generation, and complete observability.
- 
-## Installation and Setup
-
-To get started, [sign in with Github on the Remembrall platform](https://remembrall.dev/login) 
-and copy your [API key from the settings page](https://remembrall.dev/dashboard/settings).
-
-
-## Memory
-
-See a [usage example](/docs/integrations/memory/remembrall).
-
diff --git a/langchain_md_files/integrations/providers/replicate.mdx b/langchain_md_files/integrations/providers/replicate.mdx
deleted file mode 100644
index 21bd1925ddf6d1ff85ae914212d48dad8877fb72..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/replicate.mdx
+++ /dev/null
@@ -1,46 +0,0 @@
-# Replicate
-This page covers how to run models on Replicate within LangChain.
-
-## Installation and Setup
-- Create a [Replicate](https://replicate.com) account. Get your API key and set it as an environment variable (`REPLICATE_API_TOKEN`)
-- Install the [Replicate python client](https://github.com/replicate/replicate-python) with `pip install replicate`
-
-## Calling a model
-
-Find a model on the [Replicate explore page](https://replicate.com/explore), and then paste in the model name and version in this format: `owner-name/model-name:version`
-
-For example, for this [dolly model](https://replicate.com/replicate/dolly-v2-12b), click on the API tab. The model name/version would be: `"replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"`
-
-Only the `model` param is required, but any other model parameters can also be passed in with the format `input={model_param: value, ...}`
-
-
-For example, if we were running stable diffusion and wanted to change the image dimensions:
-
-```
-Replicate(model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf", input={'image_dimensions': '512x512'})
-```
-
-*Note that only the first output of a model will be returned.*
-From here, we can initialize our model:
-
-```python
-llm = Replicate(model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5")
-```
-
-And run it:
-
-```python
-prompt = """
-Answer the following yes/no question by reasoning step by step.
-Can a dog drive a car?
-"""
-llm(prompt)
-```
-
-We can call any Replicate model (not just LLMs) using this syntax. For example, we can call [Stable Diffusion](https://replicate.com/stability-ai/stable-diffusion):
-
-```python
-text2image = Replicate(model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf", input={'image_dimensions':'512x512'})
-
-image_output = text2image("A cat riding a motorcycle by Picasso")
-```
diff --git a/langchain_md_files/integrations/providers/roam.mdx b/langchain_md_files/integrations/providers/roam.mdx
deleted file mode 100644
index 322ade8d29aa390d54ddff211e883ab49e40f58b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/roam.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Roam
-
->[ROAM](https://roamresearch.com/) is a note-taking tool for networked thought, designed to create a personal knowledge base.
- 
-## Installation and Setup
-
-There isn't any special setup for it.
-
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/roam).
-
-```python
-from langchain_community.document_loaders import RoamLoader
-```
diff --git a/langchain_md_files/integrations/providers/robocorp.mdx b/langchain_md_files/integrations/providers/robocorp.mdx
deleted file mode 100644
index edafdf75dd27fa14067742d8882d97e64657a626..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/robocorp.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
-# Sema4 (fka Robocorp)
-
->[Robocorp](https://robocorp.com/) helps build and operate Python workers that run seamlessly anywhere at any scale
-
-
-## Installation and Setup
-
-You need to install `langchain-robocorp` python package:
-
-```bash
-pip install langchain-robocorp
-```
-
-You will need a running instance of `Action Server` to communicate with from your agent application. 
-See the [Robocorp Quickstart](https://github.com/robocorp/robocorp#quickstart) on how to setup Action Server and create your Actions.
-
-You can bootstrap a new project using Action Server `new` command.
-
-```bash
-action-server new
-cd ./your-project-name
-action-server start
-```
-
-## Tool
-
-```python
-from langchain_robocorp.toolkits import ActionServerRequestTool
-```
-
-## Toolkit
-
-See a [usage example](/docs/integrations/tools/robocorp).
-
-```python
-from langchain_robocorp import ActionServerToolkit
-```
diff --git a/langchain_md_files/integrations/providers/rockset.mdx b/langchain_md_files/integrations/providers/rockset.mdx
deleted file mode 100644
index 735c2181783fafcd389fa68190369e0d112d25e5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/rockset.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# Rockset
-
->[Rockset](https://rockset.com/product/) is a real-time analytics database service for serving low latency, high concurrency analytical queries at scale. It builds a Converged Index™ on structured and semi-structured data with an efficient store for vector embeddings. Its support for running SQL on schemaless data makes it a perfect choice for running vector search with metadata filters. 
-
-## Installation and Setup
-
-Make sure you have Rockset account and go to the web console to get the API key. Details can be found on [the website](https://rockset.com/docs/rest-api/).
-
-```bash
-pip install rockset
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/rockset).
-
-```python
-from langchain_community.vectorstores import Rockset 
-```
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/rockset).
-```python
-from langchain_community.document_loaders import RocksetLoader
-```
-
-## Chat Message History
-
-See a [usage example](/docs/integrations/memory/rockset_chat_message_history).
-```python
-from langchain_community.chat_message_histories import RocksetChatMessageHistory
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/runhouse.mdx b/langchain_md_files/integrations/providers/runhouse.mdx
deleted file mode 100644
index d0b63ed4905738311964f9ee3196aacef2b6c4f5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/runhouse.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
-# Runhouse
-
-This page covers how to use the [Runhouse](https://github.com/run-house/runhouse) ecosystem within LangChain.
-It is broken into three parts: installation and setup, LLMs, and Embeddings.
-
-## Installation and Setup
-- Install the Python SDK with `pip install runhouse`
-- If you'd like to use on-demand cluster, check your cloud credentials with `sky check`
-
-## Self-hosted LLMs
-For a basic self-hosted LLM, you can use the `SelfHostedHuggingFaceLLM` class. For more
-custom LLMs, you can use the `SelfHostedPipeline` parent class.
-
-```python
-from langchain_community.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM
-```
-
-For a more detailed walkthrough of the Self-hosted LLMs, see [this notebook](/docs/integrations/llms/runhouse)
-
-## Self-hosted Embeddings
-There are several ways to use self-hosted embeddings with LangChain via Runhouse.
-
-For a basic self-hosted embedding from a Hugging Face Transformers model, you can use 
-the `SelfHostedEmbedding` class.
-```python
-from langchain_community.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM
-```
-
-For a more detailed walkthrough of the Self-hosted Embeddings, see [this notebook](/docs/integrations/text_embedding/self-hosted)
diff --git a/langchain_md_files/integrations/providers/rwkv.mdx b/langchain_md_files/integrations/providers/rwkv.mdx
deleted file mode 100644
index 90a795a420865b8fee15919486e4c30bf4452e28..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/rwkv.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
-# RWKV-4
-
-This page covers how to use the `RWKV-4` wrapper within LangChain.
-It is broken into two parts: installation and setup, and then usage with an example.
-
-## Installation and Setup
-- Install the Python package with `pip install rwkv`
-- Install the tokenizer Python package with `pip install tokenizer`
-- Download a [RWKV model](https://huggingface.co/BlinkDL/rwkv-4-raven/tree/main) and place it in your desired directory
-- Download the [tokens file](https://raw.githubusercontent.com/BlinkDL/ChatRWKV/main/20B_tokenizer.json)
-
-## Usage
-
-### RWKV
-
-To use the RWKV wrapper, you need to provide the path to the pre-trained model file and the tokenizer's configuration.
-```python
-from langchain_community.llms import RWKV
-
-# Test the model
-
-```python
-
-def generate_prompt(instruction, input=None):
-    if input:
-        return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
-
-# Instruction:
-{instruction}
-
-# Input:
-{input}
-
-# Response:
-"""
-    else:
-        return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
-
-# Instruction:
-{instruction}
-
-# Response:
-"""
-
-
-model = RWKV(model="./models/RWKV-4-Raven-3B-v7-Eng-20230404-ctx4096.pth", strategy="cpu fp32", tokens_path="./rwkv/20B_tokenizer.json")
-response = model.invoke(generate_prompt("Once upon a time, "))
-```
-## Model File
-
-You can find links to model file downloads at the [RWKV-4-Raven](https://huggingface.co/BlinkDL/rwkv-4-raven/tree/main) repository.
-
-### Rwkv-4 models -> recommended VRAM
-
-
-```
-RWKV VRAM
-Model | 8bit | bf16/fp16 | fp32
-14B   | 16GB | 28GB      | >50GB
-7B    | 8GB  | 14GB      | 28GB
-3B    | 2.8GB| 6GB       | 12GB
-1b5   | 1.3GB| 3GB       | 6GB
-```
-
-See the [rwkv pip](https://pypi.org/project/rwkv/) page for more information about strategies, including streaming and cuda support.
diff --git a/langchain_md_files/integrations/providers/salesforce.mdx b/langchain_md_files/integrations/providers/salesforce.mdx
deleted file mode 100644
index ea27f204f71b22c388b482caa9557335b1031b47..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/salesforce.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Salesforce
-
-[Salesforce](https://www.salesforce.com/) is a cloud-based software company that
-provides customer relationship management (CRM) solutions and a suite of enterprise
-applications focused on sales, customer service, marketing automation, and analytics.
-
-[langchain-salesforce](https://pypi.org/project/langchain-salesforce/) implements
-tools enabling LLMs to interact with Salesforce data.
-
-
-## Installation and Setup
-
-```bash
-pip install langchain-salesforce
-```
-
-## Tools
-
-See detail on available tools [here](/docs/integrations/tools/salesforce/).
diff --git a/langchain_md_files/integrations/providers/salute_devices.mdx b/langchain_md_files/integrations/providers/salute_devices.mdx
deleted file mode 100644
index b0566e9d76d89c1b14bd477b9be7c11e5412483b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/salute_devices.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
-# Salute Devices
-
-Salute Devices provides GigaChat LLM's models.
-
-For more info how to get access to GigaChat [follow here](https://developers.sber.ru/docs/ru/gigachat/api/integration).
-
-## Installation and Setup
-
-GigaChat package can be installed via pip from PyPI:
-
-```bash
-pip install langchain-gigachat
-```
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/gigachat).
-
-```python
-from langchain_community.llms import GigaChat
-```
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/gigachat).
-
-```python
-from langchain_gigachat.chat_models import GigaChat
-```
-
-## Embeddings
-
-See a [usage example](/docs/integrations/text_embedding/gigachat).
-
-```python
-from langchain_gigachat.embeddings import GigaChatEmbeddings
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/sap.mdx b/langchain_md_files/integrations/providers/sap.mdx
deleted file mode 100644
index 97cf2649b6e9c4ad42d6905650af79196fc86876..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/sap.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# SAP
-
->[SAP SE(Wikipedia)](https://www.sap.com/about/company.html) is a German multinational 
-> software company. It develops enterprise software to manage business operation and 
-> customer relations. The company is the world's leading 
-> `enterprise resource planning (ERP)` software vendor.
-
-## Installation and Setup
-
-We need to install the `hdbcli` python package.
-
-```bash
-pip install hdbcli
-```
-
-## Vectorstore
-
->[SAP HANA Cloud Vector Engine](https://www.sap.com/events/teched/news-guide/ai.html#article8) is 
-> a vector store fully integrated into the `SAP HANA Cloud` database.
-
-See a [usage example](/docs/integrations/vectorstores/sap_hanavector).
-
-```python
-from langchain_community.vectorstores.hanavector import HanaDB
-```
diff --git a/langchain_md_files/integrations/providers/scrapegraph.mdx b/langchain_md_files/integrations/providers/scrapegraph.mdx
deleted file mode 100644
index 93507ef3a88d8efddf0da4fab5b51f05af76fafc..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/scrapegraph.mdx
+++ /dev/null
@@ -1,41 +0,0 @@
-# ScrapeGraph AI
-
->[ScrapeGraph AI](https://scrapegraphai.com) is a service that provides AI-powered web scraping capabilities.
->It offers tools for extracting structured data, converting webpages to markdown, and processing local HTML content
->using natural language prompts.
-
-## Installation and Setup
-
-Install the required packages:
-
-```bash
-pip install langchain-scrapegraph
-```
-
-Set up your API key:
-
-```bash
-export SGAI_API_KEY="your-scrapegraph-api-key"
-```
-
-## Tools
-
-See a [usage example](/docs/integrations/tools/scrapegraph).
-
-There are four tools available:
-
-```python
-from langchain_scrapegraph.tools import (
-    SmartScraperTool,    # Extract structured data from websites
-    MarkdownifyTool,     # Convert webpages to markdown
-    LocalScraperTool,    # Process local HTML content
-    GetCreditsTool,      # Check remaining API credits
-)
-```
-
-Each tool serves a specific purpose:
-
-- `SmartScraperTool`: Extract structured data from websites given a URL, prompt and optional output schema
-- `MarkdownifyTool`: Convert any webpage to clean markdown format
-- `LocalScraperTool`: Extract structured data from a local HTML file given a prompt and optional output schema
-- `GetCreditsTool`: Check your remaining ScrapeGraph AI credits 
diff --git a/langchain_md_files/integrations/providers/searchapi.mdx b/langchain_md_files/integrations/providers/searchapi.mdx
deleted file mode 100644
index 1dfaded161009afc5ca95c8740735e47f526e637..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/searchapi.mdx
+++ /dev/null
@@ -1,80 +0,0 @@
-# SearchApi
-
-This page covers how to use the [SearchApi](https://www.searchapi.io/) Google Search API within LangChain. SearchApi is a real-time SERP API for easy SERP scraping.
-
-## Setup
-
-- Go to [https://www.searchapi.io/](https://www.searchapi.io/) to sign up for a free account
-- Get the api key and set it as an environment variable (`SEARCHAPI_API_KEY`)
-
-## Wrappers
-
-### Utility
-
-There is a SearchApiAPIWrapper utility which wraps this API. To import this utility:
-
-```python
-from langchain_community.utilities import SearchApiAPIWrapper
-```
-
-You can use it as part of a Self Ask chain:
-
-```python
-from langchain_community.utilities import SearchApiAPIWrapper
-from langchain_openai import OpenAI
-from langchain.agents import initialize_agent, Tool
-from langchain.agents import AgentType
-
-import os
-
-os.environ["SEARCHAPI_API_KEY"] = ""
-os.environ['OPENAI_API_KEY'] = ""
-
-llm = OpenAI(temperature=0)
-search = SearchApiAPIWrapper()
-tools = [
-    Tool(
-        name="Intermediate Answer",
-        func=search.run,
-        description="useful for when you need to ask with search"
-    )
-]
-
-self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)
-self_ask_with_search.run("Who lived longer: Plato, Socrates, or Aristotle?")
-```
-
-#### Output
-
-```
-> Entering new AgentExecutor chain...
- Yes.
-Follow up: How old was Plato when he died?
-Intermediate answer: eighty
-Follow up: How old was Socrates when he died?
-Intermediate answer: | Socrates | 
-| -------- | 
-| Born | c. 470 BC Deme Alopece, Athens | 
-| Died | 399 BC (aged approximately 71) Athens | 
-| Cause of death | Execution by forced suicide by poisoning | 
-| Spouse(s) | Xanthippe, Myrto | 
-
-Follow up: How old was Aristotle when he died?
-Intermediate answer: 62 years
-So the final answer is: Plato
-
-> Finished chain.
-'Plato'
-```
-
-### Tool
-
-You can also easily load this wrapper as a Tool (to use with an Agent).
-You can do this with:
-
-```python
-from langchain.agents import load_tools
-tools = load_tools(["searchapi"])
-```
-
-For more information on tools, see [this page](/docs/how_to/tools_builtin).
diff --git a/langchain_md_files/integrations/providers/searx.mdx b/langchain_md_files/integrations/providers/searx.mdx
deleted file mode 100644
index 687900e47d80aebd32d110360d25bff6d30013e3..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/searx.mdx
+++ /dev/null
@@ -1,90 +0,0 @@
-# SearxNG Search API
-
-This page covers how to use the SearxNG search API within LangChain.
-It is broken into two parts: installation and setup, and then references to the specific SearxNG API wrapper.
-
-## Installation and Setup
-
-While it is possible to utilize the wrapper in conjunction with  [public searx
-instances](https://searx.space/) these instances frequently do not permit API
-access (see note on output format below) and have limitations on the frequency
-of requests. It is recommended to opt for a self-hosted instance instead.
-
-### Self Hosted Instance:
-
-See [this page](https://searxng.github.io/searxng/admin/installation.html) for installation instructions.
-
-When you install SearxNG, the only active output format by default is the HTML format.
-You need to activate the `json` format to use the API. This can be done by adding the following line to the `settings.yml` file:
-```yaml
-search:
-    formats:
-        - html
-        - json
-```
-You can make sure that the API is working by issuing a curl request to the API endpoint:
-
-`curl -kLX GET --data-urlencode q='langchain' -d format=json http://localhost:8888`
-
-This should return a JSON object with the results.
-
-
-## Wrappers
-
-### Utility
-
-To use the wrapper we need to pass the host of the SearxNG instance to the wrapper with:
-    1. the named parameter `searx_host` when creating the instance.
-    2. exporting the environment variable `SEARXNG_HOST`.
-
-You can use the wrapper to get results from a SearxNG instance. 
-
-```python
-from langchain_community.utilities import SearxSearchWrapper
-s = SearxSearchWrapper(searx_host="http://localhost:8888")
-s.run("what is a large language model?")
-```
-
-### Tool
-
-You can also load this wrapper as a Tool (to use with an Agent).
-
-You can do this with:
-
-```python
-from langchain.agents import load_tools
-tools = load_tools(["searx-search"],
-                    searx_host="http://localhost:8888",
-                    engines=["github"])
-```
-
-Note that we could _optionally_ pass custom engines to use.
-
-If you want to obtain results with metadata as *json* you can use:
-```python
-tools = load_tools(["searx-search-results-json"],
-                    searx_host="http://localhost:8888",
-                    num_results=5)
-```
-
-#### Quickly creating tools
-
-This examples showcases a quick way to create multiple tools from the same
-wrapper.
-
-```python
-from langchain_community.tools.searx_search.tool import SearxSearchResults
-
-wrapper = SearxSearchWrapper(searx_host="**")
-github_tool = SearxSearchResults(name="Github", wrapper=wrapper,
-                            kwargs = {
-                                "engines": ["github"],
-                                })
-
-arxiv_tool = SearxSearchResults(name="Arxiv", wrapper=wrapper,
-                            kwargs = {
-                                "engines": ["arxiv"]
-                                })
-```
-
-For more information on tools, see [this page](/docs/how_to/tools_builtin).
diff --git a/langchain_md_files/integrations/providers/semadb.mdx b/langchain_md_files/integrations/providers/semadb.mdx
deleted file mode 100644
index 905ef96613244363d72334f1058c9049c10b8216..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/semadb.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# SemaDB
-
->[SemaDB](https://semafind.com/) is a no fuss vector similarity search engine. It provides a low-cost cloud hosted version to help you build AI applications with ease.
-
-With SemaDB Cloud, our hosted version, no fuss means no pod size calculations, no schema definitions, no partition settings, no parameter tuning, no search algorithm tuning, no complex installation, no complex API. It is integrated with [RapidAPI](https://rapidapi.com/semafind-semadb/api/semadb) providing transparent billing, automatic sharding and an interactive API playground.
-
-## Installation
-
-None required, get started directly with SemaDB Cloud at [RapidAPI](https://rapidapi.com/semafind-semadb/api/semadb).
-
-## Vector Store
-
-There is a basic wrapper around `SemaDB` collections allowing you to use it as a vectorstore.
-
-```python
-from langchain_community.vectorstores import SemaDB
-```
-
-You can follow a tutorial on how to use the wrapper in [this notebook](/docs/integrations/vectorstores/semadb).
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/serpapi.mdx b/langchain_md_files/integrations/providers/serpapi.mdx
deleted file mode 100644
index 31de2bb5ec5c2f955254e9b75c1ded5db2498bff..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/serpapi.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# SerpAPI
-
-This page covers how to use the SerpAPI search APIs within LangChain.
-It is broken into two parts: installation and setup, and then references to the specific SerpAPI wrapper.
-
-## Installation and Setup
-- Install requirements with `pip install google-search-results`
-- Get a SerpAPI api key and either set it as an environment variable (`SERPAPI_API_KEY`)
-
-## Wrappers
-
-### Utility
-
-There exists a SerpAPI utility which wraps this API. To import this utility:
-
-```python
-from langchain_community.utilities import SerpAPIWrapper
-```
-
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/serpapi).
-
-### Tool
-
-You can also easily load this wrapper as a Tool (to use with an Agent).
-You can do this with:
-```python
-from langchain.agents import load_tools
-tools = load_tools(["serpapi"])
-```
-
-For more information on this, see [this page](/docs/how_to/tools_builtin)
diff --git a/langchain_md_files/integrations/providers/singlestoredb.mdx b/langchain_md_files/integrations/providers/singlestoredb.mdx
deleted file mode 100644
index 3c77a7dfca173a55da6930da8f481d8fe410c2cd..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/singlestoredb.mdx
+++ /dev/null
@@ -1,28 +0,0 @@
-# SingleStoreDB
-
->[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching. 
-
-## Installation and Setup
-
-There are several ways to establish a [connection](https://singlestoredb-python.labs.singlestore.com/generated/singlestoredb.connect.html) to the database. You can either set up environment variables or pass named parameters to the `SingleStoreDB constructor`. 
-Alternatively, you may provide these parameters to the `from_documents` and `from_texts` methods.
-
-```bash
-pip install singlestoredb
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/singlestoredb).
-
-```python
-from langchain_community.vectorstores import SingleStoreDB
-```
-
-## Memory
-
-See a [usage example](/docs/integrations/memory/singlestoredb_chat_message_history).
-
-```python
-from langchain.memory import SingleStoreDBChatMessageHistory
-```
diff --git a/langchain_md_files/integrations/providers/sklearn.mdx b/langchain_md_files/integrations/providers/sklearn.mdx
deleted file mode 100644
index a2d9e0554d706aee1fc7e4c8e03890f3408f6ff3..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/sklearn.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
-# scikit-learn
-
->[scikit-learn](https://scikit-learn.org/stable/) is an open-source collection of machine learning algorithms, 
-> including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.
-
-## Installation and Setup
-
-- Install the Python package with `pip install scikit-learn`
-
-
-## Vector Store
-
-`SKLearnVectorStore` provides a simple wrapper around the nearest neighbor implementation in the
-scikit-learn package, allowing you to use it as a vectorstore.
-
-To import this vectorstore:
-
-```python
-from langchain_community.vectorstores import SKLearnVectorStore
-```
-
-For a more detailed walkthrough of the SKLearnVectorStore wrapper, see [this notebook](/docs/integrations/vectorstores/sklearn).
-
-
-## Retriever
-
-`Support vector machines (SVMs)` are the supervised learning 
-methods used for classification, regression and outliers detection.
-
-See a [usage example](/docs/integrations/retrievers/svm).
-
-```python
-from langchain_community.retrievers import SVMRetriever
-```
-
diff --git a/langchain_md_files/integrations/providers/slack.mdx b/langchain_md_files/integrations/providers/slack.mdx
deleted file mode 100644
index 9013e5b0cc2899cbb2650e23c6303f8faec3c7c6..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/slack.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
-# Slack
-
->[Slack](https://slack.com/) is an instant messaging program.
- 
-## Installation and Setup
-
-There isn't any special setup for it.
-
-
-## Document loader
-
-See a [usage example](/docs/integrations/document_loaders/slack).
-
-```python
-from langchain_community.document_loaders import SlackDirectoryLoader
-```
-
-## Toolkit
-
-See a [usage example](/docs/integrations/tools/slack).
-
-```python
-from langchain_community.agent_toolkits import SlackToolkit
-```
-
-## Chat loader
-
-See a [usage example](/docs/integrations/chat_loaders/slack).
-
-```python
-from langchain_community.chat_loaders.slack import SlackChatLoader
-```
diff --git a/langchain_md_files/integrations/providers/snowflake.mdx b/langchain_md_files/integrations/providers/snowflake.mdx
deleted file mode 100644
index c42c71975880373acc63abb4aef7ec0bd5251a73..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/snowflake.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
-# Snowflake
-
-> [Snowflake](https://www.snowflake.com/) is a cloud-based data-warehousing platform 
-> that allows you to store and query large amounts of data.
-
-This page covers how to use the `Snowflake` ecosystem within `LangChain`.
-
-## Embedding models
-
-Snowflake offers their open-weight `arctic` line of embedding models for free
-on [Hugging Face](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5). The most recent model, snowflake-arctic-embed-m-v1.5 feature [matryoshka embedding](https://arxiv.org/abs/2205.13147) which allows for effective vector truncation. 
-You can use these models via the 
-[HuggingFaceEmbeddings](/docs/integrations/text_embedding/huggingfacehub) connector:
-
-```shell
-pip install langchain-community sentence-transformers
-```
-
-```python
-from langchain_huggingface import HuggingFaceEmbeddings
-
-model = HuggingFaceEmbeddings(model_name="snowflake/arctic-embed-m-v1.5")
-```
-
-## Document loader
-
-You can use the [`SnowflakeLoader`](/docs/integrations/document_loaders/snowflake) 
-to load data from Snowflake:
-
-```python
-from langchain_community.document_loaders import SnowflakeLoader
-```
diff --git a/langchain_md_files/integrations/providers/spacy.mdx b/langchain_md_files/integrations/providers/spacy.mdx
deleted file mode 100644
index d893f12a3dbd65c934977b006da6dc4161406f73..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/spacy.mdx
+++ /dev/null
@@ -1,28 +0,0 @@
-# spaCy
-
->[spaCy](https://spacy.io/) is an open-source software library for advanced natural language processing, written in the programming languages Python and Cython.
- 
-## Installation and Setup
-
-
-```bash
-pip install spacy
-```
-
-
-
-## Text Splitter
-
-See a [usage example](/docs/how_to/split_by_token/#spacy).
-
-```python
-from langchain_text_splitters import SpacyTextSplitter
-```
-
-## Text Embedding Models
-
-See a [usage example](/docs/integrations/text_embedding/spacy_embedding)
-
-```python
-from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/spark.mdx b/langchain_md_files/integrations/providers/spark.mdx
deleted file mode 100644
index a7721415af8db2e4d6863aea163a11ca8059bd82..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/spark.mdx
+++ /dev/null
@@ -1,49 +0,0 @@
-# Spark
-
->[Apache Spark](https://spark.apache.org/) is a unified analytics engine for 
-> large-scale data processing. It provides high-level APIs in Scala, Java, 
-> Python, and R, and an optimized engine that supports general computation 
-> graphs for data analysis. It also supports a rich set of higher-level 
-> tools including `Spark SQL` for SQL and DataFrames, `pandas API on Spark` 
-> for pandas workloads, `MLlib` for machine learning, 
-> `GraphX` for graph processing, and `Structured Streaming` for stream processing.
-
-## Document loaders
-
-### PySpark
-
-It loads data from a `PySpark` DataFrame.
-
-See a [usage example](/docs/integrations/document_loaders/pyspark_dataframe).
-
-```python
-from langchain_community.document_loaders import PySparkDataFrameLoader
-```
-
-## Tools/Toolkits
-
-### Spark SQL toolkit
-
-Toolkit for interacting with `Spark SQL`.
-
-See a [usage example](/docs/integrations/tools/spark_sql).
-
-```python
-from langchain_community.agent_toolkits import SparkSQLToolkit, create_spark_sql_agent
-from langchain_community.utilities.spark_sql import SparkSQL
-```
-
-#### Spark SQL individual tools
-
-You can use individual tools from the Spark SQL Toolkit:
-- `InfoSparkSQLTool`: tool for getting metadata about a Spark SQL
-- `ListSparkSQLTool`: tool for getting tables names
-- `QueryCheckerTool`: tool uses an LLM to check if a query is correct
-- `QuerySparkSQLTool`: tool for querying a Spark SQL
-
-```python
-from langchain_community.tools.spark_sql.tool import InfoSparkSQLTool
-from langchain_community.tools.spark_sql.tool import ListSparkSQLTool
-from langchain_community.tools.spark_sql.tool import QueryCheckerTool
-from langchain_community.tools.spark_sql.tool import QuerySparkSQLTool
-```
diff --git a/langchain_md_files/integrations/providers/sparkllm.mdx b/langchain_md_files/integrations/providers/sparkllm.mdx
deleted file mode 100644
index c4a661f1e3321e4fbe95a1e0106c48578c7958a5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/sparkllm.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
-# SparkLLM
-
->[SparkLLM](https://xinghuo.xfyun.cn/spark) is a large-scale cognitive model independently developed by iFLYTEK.
-It has cross-domain knowledge and language understanding ability by learning a large amount of texts, codes and images.
-It can understand and perform tasks based on natural dialogue.
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/sparkllm).
-
-```python
-from langchain_community.chat_models import ChatSparkLLM
-```
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/sparkllm).
-
-```python
-from langchain_community.llms import SparkLLM
-```
-
-## Embedding models
-
-See a [usage example](/docs/integrations/text_embedding/sparkllm)
-
-```python
-from langchain_community.embeddings import SparkLLMTextEmbeddings
-```
diff --git a/langchain_md_files/integrations/providers/spreedly.mdx b/langchain_md_files/integrations/providers/spreedly.mdx
deleted file mode 100644
index 16930aa06e91078579abb2ecb3356157e1381a9c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/spreedly.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
-# Spreedly
-
->[Spreedly](https://docs.spreedly.com/) is a service that allows you to securely store credit cards and use them to transact against any number of payment gateways and third party APIs. It does this by simultaneously providing a card tokenization/vault service as well as a gateway and receiver integration service. Payment methods tokenized by Spreedly are stored at `Spreedly`, allowing you to independently store a card and then pass that card to different end points based on your business requirements.
- 
-## Installation and Setup
-
-See [setup instructions](/docs/integrations/document_loaders/spreedly).
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/spreedly).
-
-```python
-from langchain_community.document_loaders import SpreedlyLoader
-```
diff --git a/langchain_md_files/integrations/providers/sqlite.mdx b/langchain_md_files/integrations/providers/sqlite.mdx
deleted file mode 100644
index 3bba662f0d888930e2c4c04072ed531f23f57c37..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/sqlite.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
-# SQLite
-
->[SQLite](https://en.wikipedia.org/wiki/SQLite) is a database engine written in the 
-> C programming language. It is not a standalone app; rather, it is a library that 
-> software developers embed in their apps. As such, it belongs to the family of 
-> embedded databases. It is the most widely deployed database engine, as it is 
-> used by several of the top web browsers, operating systems, mobile phones, and other embedded systems.
-
-## Installation and Setup
-
-We need to install the `SQLAlchemy` python package.
-
-```bash
-pip install SQLAlchemy
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/sqlitevec).
-
-```python
-from langchain_community.vectorstores import SQLiteVec
-from langchain_community.vectorstores import SQLiteVSS # legacy
-```
-
-## Memory
-
-See a [usage example](/docs/integrations/memory/sqlite).
-
-```python
-from langchain_community.chat_message_histories import SQLChatMessageHistory
-```
diff --git a/langchain_md_files/integrations/providers/stackexchange.mdx b/langchain_md_files/integrations/providers/stackexchange.mdx
deleted file mode 100644
index b3b00932f943d21916377bb6912e393fe01664cc..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/stackexchange.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
-# Stack Exchange
-
->[Stack Exchange](https://en.wikipedia.org/wiki/Stack_Exchange) is a network of 
-question-and-answer (Q&A) websites on topics in diverse fields, each site covering 
-a specific topic, where questions, answers, and users are subject to a reputation award process.
-
-This page covers how to use the `Stack Exchange API` within LangChain.
-
-## Installation and Setup
-- Install requirements with 
-```bash
-pip install stackapi
-```
-
-## Wrappers
-
-### Utility
-
-There exists a StackExchangeAPIWrapper utility which wraps this API. To import this utility:
-
-```python
-from langchain_community.utilities import StackExchangeAPIWrapper
-```
-
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/stackexchange).
-
-### Tool
-
-You can also easily load this wrapper as a Tool (to use with an Agent).
-You can do this with:
-```python
-from langchain.agents import load_tools
-tools = load_tools(["stackexchange"])
-```
-
-For more information on tools, see [this page](/docs/how_to/tools_builtin).
diff --git a/langchain_md_files/integrations/providers/starrocks.mdx b/langchain_md_files/integrations/providers/starrocks.mdx
deleted file mode 100644
index bc5c9983c9e4a9e8bf90c2f7b683a7417b33c98f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/starrocks.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# StarRocks
-
->[StarRocks](https://www.starrocks.io/) is a High-Performance Analytical Database.
-`StarRocks` is a next-gen sub-second MPP database for full analytics scenarios, including multi-dimensional analytics, real-time analytics and ad-hoc query.
-
->Usually `StarRocks` is categorized into OLAP, and it has showed excellent performance in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.
-
-## Installation and Setup
-
-
-```bash
-pip install pymysql
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/starrocks).
-
-```python
-from langchain_community.vectorstores import StarRocks
-```
diff --git a/langchain_md_files/integrations/providers/stochasticai.mdx b/langchain_md_files/integrations/providers/stochasticai.mdx
deleted file mode 100644
index bd0b5484bb221b6d44fae70c28217fb4cc0df871..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/stochasticai.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# StochasticAI
-
-This page covers how to use the StochasticAI ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific StochasticAI wrappers.
-
-## Installation and Setup
-- Install with `pip install stochasticx`
-- Get an StochasticAI api key and set it as an environment variable (`STOCHASTICAI_API_KEY`)
-
-## Wrappers
-
-### LLM
-
-There exists an StochasticAI LLM wrapper, which you can access with 
-```python
-from langchain_community.llms import StochasticAI
-```
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/streamlit.mdx b/langchain_md_files/integrations/providers/streamlit.mdx
deleted file mode 100644
index d90f8f52b310d769f5210c91bfb71f8ba45af0a5..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/streamlit.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
-# Streamlit
-
->[Streamlit](https://streamlit.io/) is a faster way to build and share data apps.
->`Streamlit` turns data scripts into shareable web apps in minutes. All in pure Python. No front‑end experience required.
->See more examples at [streamlit.io/generative-ai](https://streamlit.io/generative-ai).
-
-## Installation and Setup
-
-We need to install the  `streamlit` Python package:
-
-```bash
-pip install streamlit
-```
-
-
-## Memory
-
-See a [usage example](/docs/integrations/memory/streamlit_chat_message_history).
-
-```python
-from langchain_community.chat_message_histories import StreamlitChatMessageHistory
-```
-
-## Callbacks
-
-See a [usage example](/docs/integrations/callbacks/streamlit).
-
-```python
-from langchain_community.callbacks import StreamlitCallbackHandler
-```
diff --git a/langchain_md_files/integrations/providers/stripe.mdx b/langchain_md_files/integrations/providers/stripe.mdx
deleted file mode 100644
index a7e80d97a7fe0eb7043cbbe8bc15408451a5d53b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/stripe.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-# Stripe
-
->[Stripe](https://stripe.com/en-ca) is an Irish-American financial services and software as a service (SaaS) company. It offers payment-processing software and application programming interfaces for e-commerce websites and mobile applications.
-
-
-## Installation and Setup
-
-See [setup instructions](/docs/integrations/document_loaders/stripe).
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/stripe).
-
-```python
-from langchain_community.document_loaders import StripeLoader
-```
diff --git a/langchain_md_files/integrations/providers/supabase.mdx b/langchain_md_files/integrations/providers/supabase.mdx
deleted file mode 100644
index 7a574800d063a36e429ce1eb9607e2e452ac5e54..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/supabase.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
-# Supabase (Postgres)
-
->[Supabase](https://supabase.com/docs) is an open-source `Firebase` alternative. 
-> `Supabase` is built on top of `PostgreSQL`, which offers strong `SQL` 
-> querying capabilities and enables a simple interface with already-existing tools and frameworks.
-
->[PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL) also known as `Postgres`,
-> is a free and open-source relational database management system (RDBMS) 
-> emphasizing extensibility and `SQL` compliance.
-
-## Installation and Setup
-
-We need to install `supabase` python package.
-
-```bash
-pip install supabase
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/supabase).
-
-```python
-from langchain_community.vectorstores import SupabaseVectorStore
-```
-
diff --git a/langchain_md_files/integrations/providers/symblai_nebula.mdx b/langchain_md_files/integrations/providers/symblai_nebula.mdx
deleted file mode 100644
index a302bd81b55a1b65942efc0a626c34f09017d29f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/symblai_nebula.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Nebula
-
-This page covers how to use [Nebula](https://symbl.ai/nebula), [Symbl.ai](https://symbl.ai/)'s LLM, ecosystem within LangChain.
-It is broken into two parts: installation and setup, and then references to specific Nebula wrappers.
-
-## Installation and Setup
-
-- Get an [Nebula API Key](https://info.symbl.ai/Nebula_Private_Beta.html) and set as environment variable `NEBULA_API_KEY`
-- Please see the [Nebula documentation](https://docs.symbl.ai/docs/nebula-llm) for more details.
-
-### LLM
-
-There exists an Nebula LLM wrapper, which you can access with
-```python
-from langchain_community.llms import Nebula
-llm = Nebula()
-```
diff --git a/langchain_md_files/integrations/providers/tableau.mdx b/langchain_md_files/integrations/providers/tableau.mdx
deleted file mode 100644
index fc872a5fa12f4adc04b0dbd7c93de4d7df7a3e7a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/tableau.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
-# Tableau
-
-[Tableau](https://www.tableau.com/) is an analytics platform that enables anyone to
-see and understand data. 
-
-
-## Installation and Setup
-
-```bash
-pip install langchain-tableau
-```
-
-## Tools
-
-See detail on available tools [here](/docs/integrations/tools/tableau).
diff --git a/langchain_md_files/integrations/providers/taiga.mdx b/langchain_md_files/integrations/providers/taiga.mdx
deleted file mode 100644
index 5d7c9876110a8c4ed90f1ceeacb0eec0ea44fc63..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/taiga.mdx
+++ /dev/null
@@ -1,49 +0,0 @@
-# Taiga
-
-> [Taiga](https://docs.taiga.io/) is an open-source project management platform designed for agile teams, offering features like Kanban, Scrum, and issue tracking.
-
-## Installation and Setup
-
-Install the `langchain-taiga` package:
-
-```bash
-pip install langchain-taiga
-```
-
-You must provide a logins via environment variable so the tools can authenticate.
-
-```bash
-export TAIGA_URL="https://taiga.xyz.org/"
-export TAIGA_API_URL="https://taiga.xyz.org/"
-export TAIGA_USERNAME="username"
-export TAIGA_PASSWORD="pw"
-export OPENAI_API_KEY="OPENAI_API_KEY"
-```
-
-
----
-
-## Tools
-
-See a [usage example](/docs/integrations/tools/taiga)
-
----
-
-## Toolkit
-
-`TaigaToolkit` groups multiple Taiga-related tools into a single interface.
-
-```python
-from langchain_taiga.toolkits import TaigaToolkit
-
-toolkit = TaigaToolkit()
-tools = toolkit.get_tools()
-
-```
-
----
-
-## Future Integrations
-
-
-Check the [Taiga Developer Docs](https://docs.taiga.io/) for more information, and watch for updates or advanced usage examples in the [langchain_taiga GitHub repo](https://github.com/Shikenso-Analytics/langchain-taiga).
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/tair.mdx b/langchain_md_files/integrations/providers/tair.mdx
deleted file mode 100644
index d84d7378033851c45c16763291f75841c4524726..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/tair.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-# Tair
-
->[Alibaba Cloud Tair](https://www.alibabacloud.com/help/en/tair/latest/what-is-tair) is a cloud native in-memory database service 
-> developed by `Alibaba Cloud`. It provides rich data models and enterprise-grade capabilities to 
-> support your real-time online scenarios while maintaining full compatibility with open-source `Redis`. 
-> `Tair` also introduces persistent memory-optimized instances that are based on 
-> new non-volatile memory (NVM) storage medium.
-
-## Installation and Setup
-
-Install Tair Python SDK:
-
-```bash
-pip install tair
-```
-
-## Vector Store
-
-```python
-from langchain_community.vectorstores import Tair
-```
-
-See a [usage example](/docs/integrations/vectorstores/tair).
diff --git a/langchain_md_files/integrations/providers/telegram.mdx b/langchain_md_files/integrations/providers/telegram.mdx
deleted file mode 100644
index 124cbd509a7c292938994282bc163563417949c8..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/telegram.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Telegram
-
->[Telegram Messenger](https://web.telegram.org/a/) is a globally accessible freemium, cross-platform, encrypted, cloud-based and centralized instant messaging service. The application also provides optional end-to-end encrypted chats and video calling, VoIP, file sharing and several other features.
-
-
-## Installation and Setup
-
-See [setup instructions](/docs/integrations/document_loaders/telegram).
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/telegram).
-
-```python
-from langchain_community.document_loaders import TelegramChatFileLoader
-from langchain_community.document_loaders import TelegramChatApiLoader
-```
-
-## Chat loader
-
-See a [usage example](/docs/integrations/chat_loaders/telegram).
-
-```python
-from langchain_community.chat_loaders.telegram import TelegramChatLoader
-```
diff --git a/langchain_md_files/integrations/providers/tencent.mdx b/langchain_md_files/integrations/providers/tencent.mdx
deleted file mode 100644
index 9efe6deed7dbbb32c63e49a0e9ae25849d1b502d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/tencent.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
-# Tencent
-
->[Tencent Holdings Ltd. (Wikipedia)](https://en.wikipedia.org/wiki/Tencent) (Chinese: 腾讯; pinyin: Téngxùn) 
-> is a Chinese multinational technology conglomerate and holding company headquartered 
-> in Shenzhen. `Tencent` is one of the highest grossing multimedia companies in the 
-> world based on revenue. It is also the world's largest company in the video game industry
-> based on its equity investments.
- 
-
-## Chat model 
-
->[Tencent's hybrid model API](https://cloud.tencent.com/document/product/1729) (`Hunyuan API`) 
-> implements dialogue communication, content generation, 
-> analysis and understanding, and can be widely used in various scenarios such as intelligent 
-> customer service, intelligent marketing, role playing, advertising, copyrighting, product description,
-> script creation, resume generation, article writing, code generation, data analysis, and content
-> analysis.
-
-
-For more information, see [this notebook](/docs/integrations/chat/tencent_hunyuan)
-
-```python
-from langchain_community.chat_models import ChatHunyuan
-```
-
-
-## Document Loaders
-
-### Tencent COS
-
->[Tencent Cloud Object Storage (COS)](https://www.tencentcloud.com/products/cos) is a distributed 
-> storage service that enables you to store any amount of data from anywhere via HTTP/HTTPS protocols. 
-> `COS` has no restrictions on data structure or format. It also has no bucket size limit and 
-> partition management, making it suitable for virtually any use case, such as data delivery, 
-> data processing, and data lakes. COS provides a web-based console, multi-language SDKs and APIs, 
-> command line tool, and graphical tools. It works well with Amazon S3 APIs, allowing you to quickly 
-> access community tools and plugins.
-
-Install the Python SDK:
-
-```bash
-pip install cos-python-sdk-v5
-```
-
-#### Tencent COS Directory
-
-For more information, see [this notebook](/docs/integrations/document_loaders/tencent_cos_directory)
-
-```python
-from langchain_community.document_loaders import TencentCOSDirectoryLoader
-from qcloud_cos import CosConfig
-```
-
-#### Tencent COS File
-
-For more information, see [this notebook](/docs/integrations/document_loaders/tencent_cos_file)
-
-```python
-from langchain_community.document_loaders import TencentCOSFileLoader
-from qcloud_cos import CosConfig
-```
-
-## Vector Store
-
-### Tencent VectorDB
-
->[Tencent Cloud VectorDB](https://www.tencentcloud.com/products/vdb) is a fully managed, 
-> self-developed enterprise-level distributed database service
->dedicated to storing, retrieving, and analyzing multidimensional vector data. The database supports a variety of index
->types and similarity calculation methods, and a single index supports 1 billion vectors, millions of QPS, and
->millisecond query latency. `Tencent Cloud Vector Database` can not only provide an external knowledge base for large
->models and improve the accuracy of large models' answers, but also be widely used in AI fields such as
->recommendation systems, NLP services, computer vision, and intelligent customer service.
-
-Install the Python SDK:
-
-```bash
-pip install tcvectordb
-```
-
-For more information, see [this notebook](/docs/integrations/vectorstores/tencentvectordb)
-
-```python
-from langchain_community.vectorstores import TencentVectorDB
-```
-
-## Chat loader
-
-### WeChat
-
->[WeChat](https://www.wechat.com/) or `Weixin` in Chinese is a Chinese 
-> instant messaging, social media, and mobile payment app developed by `Tencent`.
-
-See a [usage example](/docs/integrations/chat_loaders/wechat).
-
diff --git a/langchain_md_files/integrations/providers/tensorflow_datasets.mdx b/langchain_md_files/integrations/providers/tensorflow_datasets.mdx
deleted file mode 100644
index b3cc150977b41436c29aa521f9c247abaa7ffe8a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/tensorflow_datasets.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# TensorFlow Datasets
-
->[TensorFlow Datasets](https://www.tensorflow.org/datasets) is a collection of datasets ready to use, 
-> with TensorFlow or other Python ML frameworks, such as Jax. All datasets are exposed 
-> as [tf.data.Datasets](https://www.tensorflow.org/api_docs/python/tf/data/Dataset), 
-> enabling easy-to-use and high-performance input pipelines. To get started see 
-> the [guide](https://www.tensorflow.org/datasets/overview) and 
-> the [list of datasets](https://www.tensorflow.org/datasets/catalog/overview#all_datasets).
-
-
-
-## Installation and Setup
-
-You need to install `tensorflow` and `tensorflow-datasets` python packages.
-
-```bash
-pip install tensorflow
-```
-
-```bash
-pip install tensorflow-dataset
-```
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/tensorflow_datasets).
-
-```python
-from langchain_community.document_loaders import TensorflowDatasetLoader
-```
diff --git a/langchain_md_files/integrations/providers/tidb.mdx b/langchain_md_files/integrations/providers/tidb.mdx
deleted file mode 100644
index 401b4300c48f7ed94ac1b043f71dbc6abee04251..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/tidb.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
-# TiDB
-
-> [TiDB Cloud](https://www.pingcap.com/tidb-serverless), is a comprehensive Database-as-a-Service (DBaaS) solution,
-> that provides dedicated and serverless options. `TiDB Serverless` is now integrating 
-> a built-in vector search into the MySQL landscape. With this enhancement, you can seamlessly 
-> develop AI applications using `TiDB Serverless` without the need for a new database or additional 
-> technical stacks. Create a free TiDB Serverless cluster and start using the vector search feature at https://pingcap.com/ai.
-
-
-## Installation and Setup
-
-You have to get the connection details for the TiDB database. 
-Visit the [TiDB Cloud](https://tidbcloud.com/) to get the connection details.
-
-```bash
-## Document loader
-
-```python
-from langchain_community.document_loaders import TiDBLoader
-```
-
-Please refer the details [here](/docs/integrations/document_loaders/tidb).
-
-## Vector store
-
-```python
-from langchain_community.vectorstores import TiDBVectorStore
-```
-Please refer the details [here](/docs/integrations/vectorstores/tidb_vector).
-
-
-## Memory
-
-```python
-from langchain_community.chat_message_histories import TiDBChatMessageHistory
-```
-
-Please refer the details [here](/docs/integrations/memory/tidb_chat_message_history).
diff --git a/langchain_md_files/integrations/providers/tigergraph.mdx b/langchain_md_files/integrations/providers/tigergraph.mdx
deleted file mode 100644
index 95a62635c83a3464fe34bb68f12b8e38ca74daf1..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/tigergraph.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# TigerGraph
-
->[TigerGraph](https://www.tigergraph.com/tigergraph-db/) is a natively distributed and high-performance graph database.
-> The storage of data in a graph format of vertices and edges leads to rich relationships, 
-> ideal for grouding LLM responses.
-
-## Installation and Setup
-
-Follow instructions [how to connect to the `TigerGraph` database](https://docs.tigergraph.com/pytigergraph/current/getting-started/connection).
-
-Install the Python SDK:
-
-```bash
-pip install pyTigerGraph
-```
-
-## Graph store
-
-### TigerGraph
-
-See a [usage example](/docs/integrations/graphs/tigergraph).
-
-```python
-from langchain_community.graphs import TigerGraph
-```
diff --git a/langchain_md_files/integrations/providers/tigris.mdx b/langchain_md_files/integrations/providers/tigris.mdx
deleted file mode 100644
index 7852b6453ccb62c1b8819c50679b9e5a47d0907f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/tigris.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Tigris
-
-> [Tigris](https://tigrisdata.com) is an open-source Serverless NoSQL Database and Search Platform designed to simplify building high-performance vector search applications.
-> `Tigris` eliminates the infrastructure complexity of managing, operating, and synchronizing multiple tools, allowing you to focus on building great applications instead.
-
-## Installation and Setup
-
-
-```bash
-pip install tigrisdb openapi-schema-pydantic
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/tigris).
-
-```python
-from langchain_community.vectorstores import Tigris
-```
diff --git a/langchain_md_files/integrations/providers/tomarkdown.mdx b/langchain_md_files/integrations/providers/tomarkdown.mdx
deleted file mode 100644
index 08787f943967b752df0ca39786548b50d9c6b8d7..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/tomarkdown.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-# 2Markdown
-
->[2markdown](https://2markdown.com/) service transforms website content into structured markdown files.
-
-
-## Installation and Setup
-
-We need the `API key`. See [instructions how to get it](https://2markdown.com/login).
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/tomarkdown).
-
-```python
-from langchain_community.document_loaders import ToMarkdownLoader
-```
diff --git a/langchain_md_files/integrations/providers/transwarp.mdx b/langchain_md_files/integrations/providers/transwarp.mdx
deleted file mode 100644
index 4406f885babd00a167e941ae679d97d030d8eb31..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/transwarp.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-# Transwarp
-
->[Transwarp](https://www.transwarp.cn/en/introduction) aims to build 
-> enterprise-level big data and AI infrastructure software, 
-> to shape the future of data world. It provides enterprises with 
-> infrastructure software and services around the whole data lifecycle, 
-> including integration, storage, governance, modeling, analysis, 
-> mining and circulation. 
-> 
-> `Transwarp` focuses on technology research and 
-> development and has accumulated core technologies in these aspects: 
-> distributed computing, SQL compilations, database technology, 
-> unification for multi-model data management, container-based cloud computing, 
-> and big data analytics and intelligence.
-
-## Installation
-
-You have to install several python packages:
-
-```bash
-pip install -U tiktoken hippo-api
-```
-
-and get the connection configuration.
-
-## Vector stores
-
-### Hippo
-
-See [a usage example and installation instructions](/docs/integrations/vectorstores/hippo).
-
-```python
-from langchain_community.vectorstores.hippo import Hippo
-```
diff --git a/langchain_md_files/integrations/providers/trello.mdx b/langchain_md_files/integrations/providers/trello.mdx
deleted file mode 100644
index 0b897ae66021d1a89bd11ff44ffdb813c3a25f24..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/trello.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Trello
-
->[Trello](https://www.atlassian.com/software/trello) is a web-based project management and collaboration tool that allows individuals and teams to organize and track their tasks and projects. It provides a visual interface known as a "board" where users can create lists and cards to represent their tasks and activities.
->The TrelloLoader allows us to load cards from a `Trello` board.
-
-
-## Installation and Setup
-
-```bash
-pip install py-trello beautifulsoup4
-```
-
-See [setup instructions](/docs/integrations/document_loaders/trello).
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/trello).
-
-```python
-from langchain_community.document_loaders import TrelloLoader
-```
diff --git a/langchain_md_files/integrations/providers/trubrics.mdx b/langchain_md_files/integrations/providers/trubrics.mdx
deleted file mode 100644
index 4681b34bff4dd5758d82baac340c404d6cd47f31..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/trubrics.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-# Trubrics
-
-
->[Trubrics](https://trubrics.com) is an LLM user analytics platform that lets you collect, analyse and manage user
-prompts & feedback on AI models.
->
->Check out [Trubrics repo](https://github.com/trubrics/trubrics-sdk) for more information on `Trubrics`.
-
-## Installation and Setup
-
-We need to install the  `trubrics` Python package:
-
-```bash
-pip install trubrics
-```
-
-
-## Callbacks
-
-See a [usage example](/docs/integrations/callbacks/trubrics).
-
-```python
-from langchain.callbacks import TrubricsCallbackHandler
-```
diff --git a/langchain_md_files/integrations/providers/trulens.mdx b/langchain_md_files/integrations/providers/trulens.mdx
deleted file mode 100644
index 327a6de372084f5f800c78a82a283365f48dd5c4..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/trulens.mdx
+++ /dev/null
@@ -1,82 +0,0 @@
-# TruLens
-
->[TruLens](https://trulens.org) is an [open-source](https://github.com/truera/trulens) package that provides instrumentation and evaluation tools for large language model (LLM) based applications.
-
-This page covers how to use [TruLens](https://trulens.org) to evaluate and track LLM apps built on langchain.
-
-
-## Installation and Setup
-
-Install the `trulens-eval` python package.
-
-```bash
-pip install trulens-eval
-```
-
-## Quickstart
-
-See the integration details in the [TruLens documentation](https://www.trulens.org/trulens_eval/getting_started/quickstarts/langchain_quickstart/).
-
-### Tracking
-
-Once you've created your LLM chain, you can use TruLens for evaluation and tracking. 
-TruLens has a number of [out-of-the-box Feedback Functions](https://www.trulens.org/trulens_eval/evaluation/feedback_functions/), 
-and is also an extensible framework for LLM evaluation.
-
-Create the feedback functions:
-
-```python
-from trulens_eval.feedback import Feedback, Huggingface, 
-
-# Initialize HuggingFace-based feedback function collection class:
-hugs = Huggingface()
-openai = OpenAI()
-
-# Define a language match feedback function using HuggingFace.
-lang_match = Feedback(hugs.language_match).on_input_output()
-# By default this will check language match on the main app input and main app
-# output.
-
-# Question/answer relevance between overall question and answer.
-qa_relevance = Feedback(openai.relevance).on_input_output()
-# By default this will evaluate feedback on main app input and main app output.
-
-# Toxicity of input
-toxicity = Feedback(openai.toxicity).on_input()
-```
-
-### Chains
-
-After you've set up Feedback Function(s) for evaluating your LLM, you can wrap your application with 
-TruChain to get detailed tracing, logging and evaluation of your LLM app.
-
-Note: See code for the `chain` creation is in 
-the [TruLens documentation](https://www.trulens.org/trulens_eval/getting_started/quickstarts/langchain_quickstart/).
-
-```python
-from trulens_eval import TruChain
-
-# wrap your chain with TruChain
-truchain = TruChain(
-    chain,
-    app_id='Chain1_ChatApplication',
-    feedbacks=[lang_match, qa_relevance, toxicity]
-)
-# Note: any `feedbacks` specified here will be evaluated and logged whenever the chain is used.
-truchain("que hora es?")
-```
-
-### Evaluation
-
-Now you can explore your LLM-based application!
-
-Doing so will help you understand how your LLM application is performing at a glance. As you iterate new versions of your LLM application, you can compare their performance across all of the different quality metrics you've set up. You'll also be able to view evaluations at a record level, and explore the chain metadata for each record.
-
-```python
-from trulens_eval import Tru
-
-tru = Tru()
-tru.run_dashboard() # open a Streamlit app to explore
-```
-
-For more information on TruLens, visit [trulens.org](https://www.trulens.org/)
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/twitter.mdx b/langchain_md_files/integrations/providers/twitter.mdx
deleted file mode 100644
index d2ba4fecb538815a267dd922275f36a35da6fc1d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/twitter.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# Twitter
-
->[Twitter](https://twitter.com/) is an online social media and social networking service.
-
-
-## Installation and Setup
-
-```bash
-pip install tweepy
-```
-
-We must initialize the loader with the `Twitter API` token, and we need to set up the Twitter `username`.
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/twitter).
-
-```python
-from langchain_community.document_loaders import TwitterTweetLoader
-```
-
-## Chat loader
-
-See a [usage example](/docs/integrations/chat_loaders/twitter).
diff --git a/langchain_md_files/integrations/providers/typesense.mdx b/langchain_md_files/integrations/providers/typesense.mdx
deleted file mode 100644
index 5bb2b3ca0e41cdd4bf47763deeaca8ec583ff253..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/typesense.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Typesense
-
-> [Typesense](https://typesense.org) is an open-source, in-memory search engine, that you can either 
-> [self-host](https://typesense.org/docs/guide/install-typesense.html#option-2-local-machine-self-hosting) or run 
-> on [Typesense Cloud](https://cloud.typesense.org/).
-> `Typesense` focuses on performance by storing the entire index in RAM (with a backup on disk) and also 
-> focuses on providing an out-of-the-box developer experience by simplifying available options and setting good defaults.
-
-## Installation and Setup
-
-
-```bash
-pip install typesense openapi-schema-pydantic
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/typesense).
-
-```python
-from langchain_community.vectorstores import Typesense
-```
diff --git a/langchain_md_files/integrations/providers/unstructured.mdx b/langchain_md_files/integrations/providers/unstructured.mdx
deleted file mode 100644
index 8425ff1faa226da14b130216bb42c1c6c6be2446..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/unstructured.mdx
+++ /dev/null
@@ -1,234 +0,0 @@
-# Unstructured
-
->The `unstructured` package from
-[Unstructured.IO](https://www.unstructured.io/) extracts clean text from raw source documents like
-PDFs and Word documents.
-This page covers how to use the [`unstructured`](https://github.com/Unstructured-IO/unstructured)
-ecosystem within LangChain.
-
-## Installation and Setup
-
-If you are using a loader that runs locally, use the following steps to get `unstructured` and its
-dependencies running.
-
-- For the smallest installation footprint and to take advantage of features not available in the
-  open-source `unstructured` package, install the Python SDK with `pip install unstructured-client`
-  along with `pip install langchain-unstructured` to use the `UnstructuredLoader` and partition
-  remotely against the Unstructured API. This loader lives
-  in a LangChain partner repo instead of the `langchain-community` repo and you will need an
-  `api_key`, which you can generate a free key [here](https://unstructured.io/api-key/).
-    - Unstructured's documentation for the sdk can be found here:
-      https://docs.unstructured.io/api-reference/api-services/sdk
-
-- To run everything locally, install the open-source python package with `pip install unstructured`
-  along with `pip install langchain-community` and use the same `UnstructuredLoader` as mentioned above.
-    - You can install document specific dependencies with extras, e.g. `pip install "unstructured[docx]"`. Learn more about extras [here](https://docs.unstructured.io/open-source/installation/full-installation).
-    - To install the dependencies for all document types, use `pip install "unstructured[all-docs]"`.
-- Install the following system dependencies if they are not already available on your system with e.g. `brew install` for Mac.
-  Depending on what document types you're parsing, you may not need all of these.
-    - `libmagic-dev` (filetype detection)
-    - `poppler-utils` (images and PDFs)
-    - `tesseract-ocr`(images and PDFs)
-    - `qpdf` (PDFs)
-    - `libreoffice` (MS Office docs)
-    - `pandoc` (EPUBs)
-- When running locally, Unstructured also recommends using Docker [by following this
-  guide](https://docs.unstructured.io/open-source/installation/docker-installation) to ensure all
-  system dependencies are installed correctly.
-
-The Unstructured API requires API keys to make requests.
-You can request an API key [here](https://unstructured.io/api-key-hosted) and start using it today!
-Checkout the README [here](https://github.com/Unstructured-IO/unstructured-api) here to get started making API calls.
-We'd love to hear your feedback, let us know how it goes in our [community slack](https://join.slack.com/t/unstructuredw-kbe4326/shared_invite/zt-1x7cgo0pg-PTptXWylzPQF9xZolzCnwQ).
-And stay tuned for improvements to both quality and performance!
-Check out the instructions
-[here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you'd like to self-host the Unstructured API or run it locally.
-
-
-## Data Loaders
-
-The primary usage of `Unstructured` is in data loaders.
-
-### UnstructuredLoader
-
-See a [usage example](/docs/integrations/document_loaders/unstructured_file) to see how you can use
-this loader for both partitioning locally and remotely with the serverless Unstructured API.
-
-```python
-from langchain_unstructured import UnstructuredLoader
-```
-
-### UnstructuredCHMLoader
-
-`CHM` means `Microsoft Compiled HTML Help`.
-
-```python
-from langchain_community.document_loaders import UnstructuredCHMLoader
-```
-
-### UnstructuredCSVLoader
-
-A `comma-separated values` (`CSV`) file is a delimited text file that uses 
-a comma to separate values. Each line of the file is a data record. 
-Each record consists of one or more fields, separated by commas.
-
-See a [usage example](/docs/integrations/document_loaders/csv#unstructuredcsvloader).
-
-```python
-from langchain_community.document_loaders import UnstructuredCSVLoader
-```
-
-### UnstructuredEmailLoader
-
-See a [usage example](/docs/integrations/document_loaders/email).
-
-```python
-from langchain_community.document_loaders import UnstructuredEmailLoader
-```
-
-### UnstructuredEPubLoader
-
-[EPUB](https://en.wikipedia.org/wiki/EPUB) is an `e-book file format` that uses 
-the “.epub” file extension. The term is short for electronic publication and 
-is sometimes styled `ePub`. `EPUB` is supported by many e-readers, and compatible 
-software is available for most smartphones, tablets, and computers.
-
-See a [usage example](/docs/integrations/document_loaders/epub).
-
-```python
-from langchain_community.document_loaders import UnstructuredEPubLoader
-```
-
-### UnstructuredExcelLoader
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_excel).
-
-```python
-from langchain_community.document_loaders import UnstructuredExcelLoader
-```
-
-### UnstructuredFileIOLoader
-
-See a [usage example](/docs/integrations/document_loaders/google_drive#passing-in-optional-file-loaders).
-
-```python
-from langchain_community.document_loaders import UnstructuredFileIOLoader
-```
-
-### UnstructuredHTMLLoader
-
-See a [usage example](/docs/how_to/document_loader_html).
-
-```python
-from langchain_community.document_loaders import UnstructuredHTMLLoader
-```
-
-### UnstructuredImageLoader
-
-See a [usage example](/docs/integrations/document_loaders/image).
-
-```python
-from langchain_community.document_loaders import UnstructuredImageLoader
-```
-
-### UnstructuredMarkdownLoader
-
-See a [usage example](/docs/integrations/vectorstores/starrocks).
-
-```python
-from langchain_community.document_loaders import UnstructuredMarkdownLoader
-```
-
-### UnstructuredODTLoader
-
-The `Open Document Format for Office Applications (ODF)`, also known as `OpenDocument`, 
-is an open file format for word processing documents, spreadsheets, presentations 
-and graphics and using ZIP-compressed XML files. It was developed with the aim of 
-providing an open, XML-based file format specification for office applications.
-
-See a [usage example](/docs/integrations/document_loaders/odt).
-
-```python
-from langchain_community.document_loaders import UnstructuredODTLoader
-```
-
-### UnstructuredOrgModeLoader
-
-An [Org Mode](https://en.wikipedia.org/wiki/Org-mode) document is a document editing, formatting, and organizing mode, designed for notes, planning, and authoring within the free software text editor Emacs.
-
-See a [usage example](/docs/integrations/document_loaders/org_mode).
-
-```python
-from langchain_community.document_loaders import UnstructuredOrgModeLoader
-```
-
-### UnstructuredPDFLoader
-
-See a [usage example](/docs/how_to/document_loader_pdf/#layout-analysis-and-extraction-of-text-from-images).
-
-```python
-from langchain_community.document_loaders import UnstructuredPDFLoader
-```
-
-### UnstructuredPowerPointLoader
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_powerpoint).
-
-```python
-from langchain_community.document_loaders import UnstructuredPowerPointLoader
-```
-
-### UnstructuredRSTLoader
-
-A `reStructured Text` (`RST`) file is a file format for textual data 
-used primarily in the Python programming language community for technical documentation.
-
-See a [usage example](/docs/integrations/document_loaders/rst).
-
-```python
-from langchain_community.document_loaders import UnstructuredRSTLoader
-```
-
-### UnstructuredRTFLoader
-
-See a usage example in the API documentation.
-
-```python
-from langchain_community.document_loaders import UnstructuredRTFLoader
-```
-
-### UnstructuredTSVLoader
-
-A `tab-separated values` (`TSV`) file is a simple, text-based file format for storing tabular data.
-Records are separated by newlines, and values within a record are separated by tab characters.
-
-See a [usage example](/docs/integrations/document_loaders/tsv).
-
-```python
-from langchain_community.document_loaders import UnstructuredTSVLoader
-```
-
-### UnstructuredURLLoader
-
-See a [usage example](/docs/integrations/document_loaders/url).
-
-```python
-from langchain_community.document_loaders import UnstructuredURLLoader
-```
-
-### UnstructuredWordDocumentLoader
-
-See a [usage example](/docs/integrations/document_loaders/microsoft_word#using-unstructured).
-
-```python
-from langchain_community.document_loaders import UnstructuredWordDocumentLoader
-```
-
-### UnstructuredXMLLoader
-
-See a [usage example](/docs/integrations/document_loaders/xml).
-
-```python
-from langchain_community.document_loaders import UnstructuredXMLLoader
-```
-
diff --git a/langchain_md_files/integrations/providers/upstash.mdx b/langchain_md_files/integrations/providers/upstash.mdx
deleted file mode 100644
index d1bfa783c230cb5045c1742642e3cfc57d527f14..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/upstash.mdx
+++ /dev/null
@@ -1,221 +0,0 @@
-Upstash offers developers serverless databases and messaging
-platforms to build powerful applications without having to worry 
-about the operational complexity of running databases at scale.
-
-One significant advantage of Upstash is that their databases support HTTP and all of their SDKs use HTTP.
-This means that you can run this in serverless platforms, edge or any platform that does not support TCP connections.
-
-Currently, there are two Upstash integrations available for LangChain: 
-Upstash Vector as a vector embedding database and Upstash Redis as a cache and memory store.
-
-# Upstash Vector
-
-Upstash Vector is a serverless vector database that can be used to store and query vectors.
-
-## Installation
-
-Create a new serverless vector database at the [Upstash Console](https://console.upstash.com/vector).
-Select your preferred distance metric and dimension count according to your model.
-
-
-Install the Upstash Vector Python SDK with `pip install upstash-vector`.
-The Upstash Vector integration in langchain is a wrapper for the Upstash Vector Python SDK. That's why the `upstash-vector` package is required.
-
-## Integrations
-
-Create a `UpstashVectorStore` object using credentials from the Upstash Console.
-You also need to pass in an `Embeddings` object which can turn text into vector embeddings.
-
-```python
-from langchain_community.vectorstores.upstash import UpstashVectorStore
-import os
-
-os.environ["UPSTASH_VECTOR_REST_URL"] = "<UPSTASH_VECTOR_REST_URL>"
-os.environ["UPSTASH_VECTOR_REST_TOKEN"] = "<UPSTASH_VECTOR_REST_TOKEN>"
-
-store = UpstashVectorStore(
-    embedding=embeddings
-)
-```
-
-An alternative way of `UpstashVectorStore` is to pass `embedding=True`. This is a unique
-feature of the `UpstashVectorStore` thanks to the ability of the Upstash Vector indexes
-to have an associated embedding model. In this configuration, documents we want to insert or
-queries we want to search for are simply sent to Upstash Vector as text. In the background,
-Upstash Vector embeds these text and executes the request with these embeddings. To use this
-feature, [create an Upstash Vector index by selecting a model](https://upstash.com/docs/vector/features/embeddingmodels#using-a-model)
-and simply pass `embedding=True`:
-
-```python
-from langchain_community.vectorstores.upstash import UpstashVectorStore
-import os
-
-os.environ["UPSTASH_VECTOR_REST_URL"] = "<UPSTASH_VECTOR_REST_URL>"
-os.environ["UPSTASH_VECTOR_REST_TOKEN"] = "<UPSTASH_VECTOR_REST_TOKEN>"
-
-store = UpstashVectorStore(
-    embedding=True
-)
-```
-
-See [Upstash Vector documentation](https://upstash.com/docs/vector/features/embeddingmodels)
-for more detail on embedding models.
-
-## Namespaces
-You can use namespaces to partition your data in the index. Namespaces are useful when you want to query over huge amount of data, and you want to partition the data to make the queries faster. When you use namespaces, there won't be post-filtering on the results which will make the query results more precise.
-
-```python
-from langchain_community.vectorstores.upstash import UpstashVectorStore
-import os
-
-os.environ["UPSTASH_VECTOR_REST_URL"] = "<UPSTASH_VECTOR_REST_URL>"
-os.environ["UPSTASH_VECTOR_REST_TOKEN"] = "<UPSTASH_VECTOR_REST_TOKEN>"
-
-store = UpstashVectorStore(
-    embedding=embeddings
-    namespace="my_namespace"
-)
-```
-
-### Inserting Vectors
-
-```python
-from langchain.text_splitter import CharacterTextSplitter
-from langchain_community.document_loaders import TextLoader
-from langchain_openai import OpenAIEmbeddings
-
-loader = TextLoader("../../modules/state_of_the_union.txt")
-documents = loader.load()
-text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
-docs = text_splitter.split_documents(documents)
-
-# Create a new embeddings object
-embeddings = OpenAIEmbeddings()
-
-# Create a new UpstashVectorStore object
-store = UpstashVectorStore(
-    embedding=embeddings
-)
-
-# Insert the document embeddings into the store
-store.add_documents(docs)
-```
-
-When inserting documents, first they are embedded using the `Embeddings` object.
-
-Most embedding models can embed multiple documents at once, so the documents are batched and embedded in parallel.
-The size of the batch can be controlled using the `embedding_chunk_size` parameter.
-
-The embedded vectors are then stored in the Upstash Vector database. When they are sent, multiple vectors are batched together to reduce the number of HTTP requests.
-The size of the batch can be controlled using the `batch_size` parameter. Upstash Vector has a limit of 1000 vectors per batch in the free tier.
-
-```python
-store.add_documents(
-    documents,
-    batch_size=100,
-    embedding_chunk_size=200
-)
-```
-
-### Querying Vectors
-
-Vectors can be queried using a text query or another vector.
-
-The returned value is a list of Document objects.
-
-```python
-result = store.similarity_search(
-    "The United States of America",
-    k=5
-)
-```
-
-Or using a vector:
-
-```python
-vector = embeddings.embed_query("Hello world")
-
-result = store.similarity_search_by_vector(
-    vector,
-    k=5
-)
-```
-
-When searching, you can also utilize the `filter` parameter which will allow you to filter by metadata:
-
-```python
-result = store.similarity_search(
-    "The United States of America",
-    k=5,
-    filter="type = 'country'"
-)
-```
-
-See [Upstash Vector documentation](https://upstash.com/docs/vector/features/filtering)
-for more details on metadata filtering.
-
-### Deleting Vectors
-
-Vectors can be deleted by their IDs.
-
-```python
-store.delete(["id1", "id2"])
-```
-
-### Getting information about the store
-
-You can get information about your database like the distance metric dimension using the info function.
-
-When an insert happens, the database an indexing takes place. While this is happening new vectors can not be queried. `pendingVectorCount` represents the number of vector that are currently being indexed. 
-
-```python
-info = store.info()
-print(info)
-
-# Output:
-# {'vectorCount': 44, 'pendingVectorCount': 0, 'indexSize': 2642412, 'dimension': 1536, 'similarityFunction': 'COSINE'}
-```
-
-# Upstash Redis
-
-This page covers how to use [Upstash Redis](https://upstash.com/redis) with LangChain.
-
-## Installation and Setup
-- Upstash Redis Python SDK can be installed with `pip install upstash-redis`
-- A globally distributed, low-latency and highly available database can be created at the [Upstash Console](https://console.upstash.com)
-
-
-## Integrations
-All of Upstash-LangChain integrations are based on `upstash-redis` Python SDK being utilized as wrappers for LangChain.
-This SDK utilizes Upstash Redis DB by giving UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN parameters from the console.
-
-
-### Cache
-
-[Upstash Redis](https://upstash.com/redis) can be used as a cache for LLM prompts and responses.
-
-To import this cache:
-```python
-from langchain.cache import UpstashRedisCache
-```
-
-To use with your LLMs:
-```python
-import langchain
-from upstash_redis import Redis
-
-URL = "<UPSTASH_REDIS_REST_URL>"
-TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"
-
-langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN))
-```
-
-### Memory
-
-See a [usage example](/docs/integrations/memory/upstash_redis_chat_message_history).
-
-```python
-from langchain_community.chat_message_histories import (
-    UpstashRedisChatMessageHistory,
-)
-```
diff --git a/langchain_md_files/integrations/providers/usearch.mdx b/langchain_md_files/integrations/providers/usearch.mdx
deleted file mode 100644
index cdbc99ecc9094772f693c0ce6a815f2d0eafd0ab..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/usearch.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
-# USearch
->[USearch](https://unum-cloud.github.io/usearch/) is a Smaller & Faster Single-File Vector Search Engine.
-
->`USearch's` base functionality is identical to `FAISS`, and the interface should look 
-> familiar if you have ever investigated Approximate Nearest Neighbors search. 
-> `USearch` and `FAISS` both employ `HNSW` algorithm, but they differ significantly 
-> in their design principles. `USearch` is compact and broadly compatible with FAISS without 
-> sacrificing performance, with a primary focus on user-defined metrics and fewer dependencies.
-> 
-## Installation and Setup
-
-We need to install `usearch` python package.
-
-```bash
-pip install usearch
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/usearch).
-
-```python
-from langchain_community.vectorstores import USearch
-```
-
diff --git a/langchain_md_files/integrations/providers/valthera.mdx b/langchain_md_files/integrations/providers/valthera.mdx
deleted file mode 100644
index 7a7f56963a0fdca16d566c37a70b9f4decde3480..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/valthera.mdx
+++ /dev/null
@@ -1,63 +0,0 @@
-# Valthera
-
-> [Valthera](https://github.com/valthera/valthera) is an open-source framework that empowers LLM Agents to drive meaningful, context-aware user engagement. It evaluates user motivation and ability in real time, ensuring that notifications and actions are triggered only when users are most receptive.
-> 
-> **langchain-valthera** integrates Valthera with LangChain, enabling developers to build smarter, behavior-driven engagement systems that deliver personalized interactions.
-
-## Installation and Setup
-
-### Install langchain-valthera
-
-Install the LangChain Valthera package via pip:
-
-```bash
-pip install -U langchain-valthera
-```
-
-Import the ValtheraTool:
-
-```python
-from langchain_valthera.tools import ValtheraTool
-```
-
-### Example: Initializing the ValtheraTool for LangChain
-
-This example shows how to initialize the ValtheraTool using a `DataAggregator` and configuration for motivation and ability scoring.
-
-```python
-import os
-from langchain_openai import ChatOpenAI
-from valthera.aggregator import DataAggregator
-from mocks import hubspot, posthog, snowflake  # Replace these with your actual connector implementations
-from langchain_valthera.tools import ValtheraTool
-
-# Initialize the DataAggregator with your data connectors
-data_aggregator = DataAggregator(
-    connectors={
-        "hubspot": hubspot(),
-        "posthog": posthog(),
-        "app_db": snowflake()
-    }
-)
-
-# Initialize the ValtheraTool with your scoring configurations
-valthera_tool = ValtheraTool(
-    data_aggregator=data_aggregator,
-    motivation_config=[
-        {"key": "hubspot_lead_score", "weight": 0.30, "transform": lambda x: min(x, 100) / 100.0},
-        {"key": "posthog_events_count_past_30days", "weight": 0.30, "transform": lambda x: min(x, 50) / 50.0},
-        {"key": "hubspot_marketing_emails_opened", "weight": 0.20, "transform": lambda x: min(x / 10.0, 1.0)},
-        {"key": "posthog_session_count", "weight": 0.20, "transform": lambda x: min(x / 5.0, 1.0)}
-    ],
-    ability_config=[
-        {"key": "posthog_onboarding_steps_completed", "weight": 0.30, "transform": lambda x: min(x / 5.0, 1.0)},
-        {"key": "posthog_session_count", "weight": 0.30, "transform": lambda x: min(x / 10.0, 1.0)},
-        {"key": "behavior_complexity", "weight": 0.40, "transform": lambda x: 1 - (min(x, 5) / 5.0)}
-    ]
-)
-
-print("✅ ValtheraTool successfully initialized for LangChain integration!")
-```
-
-
-The langchain-valthera integration allows you to assess user behavior and decide on the best course of action for engagement, ensuring that interactions are both timely and relevant within your LangChain applications.
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/vdms.mdx b/langchain_md_files/integrations/providers/vdms.mdx
deleted file mode 100644
index b1b02217741a0e00831abcef25a86dbf34f5533b..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/vdms.mdx
+++ /dev/null
@@ -1,43 +0,0 @@
-# VDMS
-
-> [VDMS](https://github.com/IntelLabs/vdms/blob/master/README.md) is a storage solution for efficient access
-> of big-”visual”-data that aims to achieve cloud scale by searching for relevant visual data via visual metadata
-> stored as a graph and enabling machine friendly enhancements to visual data for faster access.
-
-## Installation and Setup
-
-### Install Client
-
-```bash
-pip install langchain-vdms
-```
-
-### Install Database
-
-There are two ways to get started with VDMS:
-
-
-1. Install VDMS on your local machine via docker
-    ```bash
-        docker run -d -p 55555:55555 intellabs/vdms:latest
-    ```
-
-2. Install VDMS directly on your local machine. Please see
-[installation instructions](https://github.com/IntelLabs/vdms/blob/master/INSTALL.md).
-
-## VectorStore
-
-To import this vectorstore:
-
-```python
-from langchain_vdms import VDMS
-from langchain_vdms.vectorstores import VDMS
-```
-
-To import the VDMS Client connector:
-
-```python
-from langchain_vdms.vectorstores import VDMS_Client
-```
-
-For a more detailed walkthrough of the VDMS wrapper, see [this guide](/docs/integrations/vectorstores/vdms).
diff --git a/langchain_md_files/integrations/providers/vectara/index.mdx b/langchain_md_files/integrations/providers/vectara/index.mdx
deleted file mode 100644
index 10de5f001f11285d7205600223c4c98a8d73029c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/vectara/index.mdx
+++ /dev/null
@@ -1,181 +0,0 @@
-# Vectara
-
->[Vectara](https://vectara.com/) provides a Trusted Generative AI platform, allowing organizations to rapidly create a ChatGPT-like experience (an AI assistant) 
-> which is grounded in the data, documents, and knowledge that they have (technically, it is Retrieval-Augmented-Generation-as-a-service).
-
-**Vectara Overview:**
-[Vectara](https://vectara.com/) is the trusted AI Assistant and Agent platform which focuses on enterprise readiness for mission-critical applications.
-Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:
-1. A way to extract text from files (PDF, PPT, DOCX, etc)
-2. ML-based chunking that provides state of the art performance.
-3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.
-4. Its own internal vector database where text chunks and embedding vectors are stored.
-5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments, including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). 
-6. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.
-
-For more information:
-- [Documentation](https://docs.vectara.com/docs/)
-- [API Playground](https://docs.vectara.com/docs/rest-api/)
-- [Quickstart](https://docs.vectara.com/docs/quickstart)
-
-## Installation and Setup
-
-To use `Vectara` with LangChain no special installation steps are required. 
-To get started, [sign up](https://vectara.com/integrations/langchain) for a free Vectara trial,
-and follow the [quickstart](https://docs.vectara.com/docs/quickstart) guide to create a corpus and an API key. 
-Once you have these, you can provide them as arguments to the Vectara `vectorstore`, or you can set them as environment variables.
-
-- export `VECTARA_CUSTOMER_ID`="your_customer_id"
-- export `VECTARA_CORPUS_ID`="your_corpus_id"
-- export `VECTARA_API_KEY`="your-vectara-api-key"
-
-## Vectara as a Vector Store
-
-There exists a wrapper around the Vectara platform, allowing you to use it as a `vectorstore` in LangChain:
-
-To import this vectorstore:
-```python
-from langchain_community.vectorstores import Vectara
-```
-
-To create an instance of the Vectara vectorstore:
-```python
-vectara = Vectara(
-    vectara_customer_id=customer_id, 
-    vectara_corpus_id=corpus_id, 
-    vectara_api_key=api_key
-)
-```
-The `customer_id`, `corpus_id` and `api_key` are optional, and if they are not supplied will be read from 
-the environment variables `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`, respectively.
-
-### Adding Texts or Files
-
-After you have the vectorstore, you can `add_texts` or `add_documents` as per the standard `VectorStore` interface, for example:
-
-```python
-vectara.add_texts(["to be or not to be", "that is the question"])
-```
-
-Since Vectara supports file-upload in the platform, we also added the ability to upload files (PDF, TXT, HTML, PPT, DOC, etc) directly. 
-When using this method, each file is uploaded directly to the Vectara backend, processed and chunked optimally there, so you don't have to use the LangChain document loader or chunking mechanism.
-
-As an example:
-
-```python
-vectara.add_files(["path/to/file1.pdf", "path/to/file2.pdf",...])
-```
-
-Of course you do not have to add any data, and instead just connect to an existing Vectara corpus where data may already be indexed.
-
-### Querying the VectorStore
-
-To query the Vectara vectorstore, you can use the `similarity_search` method (or `similarity_search_with_score`), which takes a query string and returns a list of results:
-```python
-results = vectara.similarity_search_with_score("what is LangChain?")
-```
-The results are returned as a list of relevant documents, and a relevance score of each document.
-
-In this case, we used the default retrieval parameters, but you can also specify the following additional arguments in `similarity_search` or `similarity_search_with_score`:
-- `k`: number of results to return (defaults to 5)
-- `lambda_val`: the [lexical matching](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) factor for hybrid search (defaults to 0.025)
-- `filter`: a [filter](https://docs.vectara.com/docs/common-use-cases/filtering-by-metadata/filter-overview) to apply to the results (default None)
-- `n_sentence_context`: number of sentences to include before/after the actual matching segment when returning results. This defaults to 2.
-- `rerank_config`: can be used to specify reranker for thr results
-   - `reranker`: mmr, rerank_multilingual_v1 or none. Note that "rerank_multilingual_v1" is a Scale only feature
-   - `rerank_k`: number of results to use for reranking
-   - `mmr_diversity_bias`: 0 = no diversity, 1 = full diversity. This is the lambda parameter in the MMR formula and is in the range 0...1
-
-To get results without the relevance score, you can simply use the 'similarity_search' method:
-```python   
-results = vectara.similarity_search("what is LangChain?")
-```
-
-## Vectara for Retrieval Augmented Generation (RAG)
-
-Vectara provides a full RAG pipeline, including generative summarization. To use it as a complete RAG solution, you can use the `as_rag` method.
-There are a few additional parameters that can be specified in the `VectaraQueryConfig` object to control retrieval and summarization:
-* k: number of results to return
-* lambda_val: the lexical matching factor for hybrid search
-* summary_config (optional): can be used to request an LLM summary in RAG
-   - is_enabled: True or False
-   - max_results: number of results to use for summary generation
-   - response_lang: language of the response summary, in ISO 639-2 format (e.g. 'en', 'fr', 'de', etc)
-* rerank_config (optional): can be used to specify Vectara Reranker of the results
-   - reranker: mmr, rerank_multilingual_v1 or none
-   - rerank_k: number of results to use for reranking
-   - mmr_diversity_bias: 0 = no diversity, 1 = full diversity. 
-     This is the lambda parameter in the MMR formula and is in the range 0...1
-
-For example:
-
-```python
-summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
-rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
-config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
-```
-Then you can use the `as_rag` method to create a RAG pipeline:
-
-```python
-query_str = "what did Biden say?"
-
-rag = vectara.as_rag(config)
-rag.invoke(query_str)['answer']
-```
-
-The `as_rag` method returns a `VectaraRAG` object, which behaves just like any LangChain Runnable, including the `invoke` or `stream` methods.
-
-## Vectara Chat
-
-The RAG functionality can be used to create a chatbot. For example, you can create a simple chatbot that responds to user input:
-
-```python
-summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
-rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
-config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
-
-query_str = "what did Biden say?"
-bot = vectara.as_chat(config)
-bot.invoke(query_str)['answer']
-```
-
-The main difference is the following: with `as_chat` Vectara internally tracks the chat history and conditions each response on the full chat history.
-There is no need to keep that history locally to LangChain, as Vectara will manage it internally.
-
-## Vectara as a LangChain retriever only
-
-If you want to use Vectara as a retriever only, you can use the `as_retriever` method, which returns a `VectaraRetriever` object.
-```python
-retriever = vectara.as_retriever(config=config)
-retriever.invoke(query_str)
-```
-
-Like with as_rag, you provide a `VectaraQueryConfig` object to control the retrieval parameters.
-In most cases you would not enable the summary_config, but it is left as an option for backwards compatibility. 
-If no summary is requested, the response will be a list of relevant documents, each with a relevance score.
-If a summary is requested, the response will be a list of relevant documents as before, plus an additional document that includes the generative summary.
-
-## Hallucination Detection score
-
-Vectara created [HHEM](https://huggingface.co/vectara/hallucination_evaluation_model) - an open source model that can be used to evaluate RAG responses for factual consistency. 
-As part of the Vectara RAG, the "Factual Consistency Score" (or FCS), which is an improved version of the open source HHEM is made available via the API. 
-This is automatically included in the output of the RAG pipeline
-
-```python
-summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
-rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
-config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
-
-rag = vectara.as_rag(config)
-resp = rag.invoke(query_str)
-print(resp['answer'])
-print(f"Vectara FCS = {resp['fcs']}")
-```
-
-## Example Notebooks
-
-For a more detailed examples of using Vectara with LangChain, see the following example notebooks:
-* [this notebook](/docs/integrations/vectorstores/vectara) shows how to use Vectara: with full RAG or just as a retriever.
-* [this notebook](/docs/integrations/retrievers/self_query/vectara_self_query) shows the self-query capability with Vectara.
-* [this notebook](/docs/integrations/providers/vectara/vectara_chat) shows how to build a chatbot with Langchain and Vectara
-
diff --git a/langchain_md_files/integrations/providers/vespa.mdx b/langchain_md_files/integrations/providers/vespa.mdx
deleted file mode 100644
index 7796fde96d78c23533c3382f4b60ea929dd4e16d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/vespa.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Vespa
-
->[Vespa](https://vespa.ai/) is a fully featured search engine and vector database. 
-> It supports vector search (ANN), lexical search, and search in structured data, all in the same query.
- 
-## Installation and Setup
-
-
-```bash
-pip install pyvespa
-```
-
-
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/vespa).
-
-```python
-from langchain.retrievers import VespaRetriever
-```
diff --git a/langchain_md_files/integrations/providers/vlite.mdx b/langchain_md_files/integrations/providers/vlite.mdx
deleted file mode 100644
index 6599dec720110beb60c68dcb49821e827bc5d3f2..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/vlite.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# vlite
-
-This page covers how to use [vlite](https://github.com/sdan/vlite) within LangChain. vlite is a simple and fast vector database for storing and retrieving embeddings.
-
-## Installation and Setup
-
-To install vlite, run the following command:
-
-```bash
-pip install vlite
-```
-
-For PDF OCR support, install the `vlite[ocr]` extra:
-
-```bash
-pip install vlite[ocr]
-```
-
-## VectorStore
-
-vlite provides a wrapper around its vector database, allowing you to use it as a vectorstore for semantic search and example selection.
-
-To import the vlite vectorstore:
-
-```python
-from langchain_community.vectorstores import vlite
-```
-
-### Usage
-
-For a more detailed walkthrough of the vlite wrapper, see [this notebook](/docs/integrations/vectorstores/vlite).
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/voyageai.mdx b/langchain_md_files/integrations/providers/voyageai.mdx
deleted file mode 100644
index d40cb69bedf6940bdce1181d35adee0860a4148f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/voyageai.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
-# VoyageAI
-
-All functionality related to VoyageAI
-
->[VoyageAI](https://www.voyageai.com/) Voyage AI builds embedding models, customized for your domain and company, for better retrieval quality.
-
-## Installation and Setup
-
-Install the integration package with
-```bash
-pip install langchain-voyageai
-```
-
-Get a VoyageAI API key and set it as an environment variable (`VOYAGE_API_KEY`)
-
-
-## Text Embedding Model
-
-See a [usage example](/docs/integrations/text_embedding/voyageai)
-
-```python
-from langchain_voyageai import VoyageAIEmbeddings
-```
-
-
-## Reranking
-
-See a [usage example](/docs/integrations/document_transformers/voyageai-reranker)
-
-```python
-from langchain_voyageai import VoyageAIRerank
-```
diff --git a/langchain_md_files/integrations/providers/wandb.mdx b/langchain_md_files/integrations/providers/wandb.mdx
deleted file mode 100644
index 8eb85116ff02fc59d2af7b5112495dff46c44b4f..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/wandb.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
-# Weights & Biases
-
->[Weights & Biases](https://wandb.ai/) is provider of the AI developer platform to train and 
-> fine-tune AI models and develop AI applications.
- 
-`Weights & Biase` products can be used to log metrics and artifacts during training, 
-and to trace the execution of your code.
-
-There are several main ways to use `Weights & Biases` products within LangChain:
-- with `wandb_tracing_enabled`
-- with `Weave` lightweight toolkit
-- with `WandbCallbackHandler` (deprecated)
-
-
-## wandb_tracing_enabled
-
-See a [usage example](/docs/integrations/providers/wandb_tracing).
-
-See in the [W&B documentation](https://docs.wandb.ai/guides/integrations/langchain).
-
-```python
-from langchain_community.callbacks import wandb_tracing_enabled
-```
-
-## Weave
-
-See in the [W&B documentation](https://weave-docs.wandb.ai/guides/integrations/langchain).
-
-
-## WandbCallbackHandler
-
-**Note:** the `WandbCallbackHandler` is being deprecated in favour of the `wandb_tracing_enabled`.
-
-See a [usage example](/docs/integrations/providers/wandb_tracking).
-
-See in the [W&B documentation](https://docs.wandb.ai/guides/integrations/langchain).
-
-```python
-from langchain_community.callbacks import WandbCallbackHandler
-```
-
-
diff --git a/langchain_md_files/integrations/providers/weather.mdx b/langchain_md_files/integrations/providers/weather.mdx
deleted file mode 100644
index 199af6ccb9772058fbc34f749147c37f2b58f62d..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/weather.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Weather
-
->[OpenWeatherMap](https://openweathermap.org/) is an open-source weather service provider.
-
-
-
-## Installation and Setup
-
-```bash
-pip install pyowm
-```
-
-We must set up the `OpenWeatherMap API token`.
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/weather).
-
-```python
-from langchain_community.document_loaders import WeatherDataLoader
-```
diff --git a/langchain_md_files/integrations/providers/weaviate.mdx b/langchain_md_files/integrations/providers/weaviate.mdx
deleted file mode 100644
index 25041cbc2736883f72ba0070c49bb1e7250449c6..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/weaviate.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
-# Weaviate
-
->[Weaviate](https://weaviate.io/) is an open-source vector database. It allows you to store data objects and vector embeddings from
->your favorite ML models, and scale seamlessly into billions of data objects.
-
-
-What is `Weaviate`?
-- Weaviate is an open-source ​database of the type ​vector search engine.
-- Weaviate allows you to store JSON documents in a class property-like fashion while attaching machine learning vectors to these documents to represent them in vector space.
-- Weaviate can be used stand-alone (aka bring your vectors) or with a variety of modules that can do the vectorization for you and extend the core capabilities.
-- Weaviate has a GraphQL-API to access your data easily.
-- We aim to bring your vector search set up to production to query in mere milliseconds (check our [open-source benchmarks](https://weaviate.io/developers/weaviate/current/benchmarks/) to see if Weaviate fits your use case).
-- Get to know Weaviate in the [basics getting started guide](https://weaviate.io/developers/weaviate/current/core-knowledge/basics.html) in under five minutes.
-
-**Weaviate in detail:**
-
-`Weaviate` is a low-latency vector search engine with out-of-the-box support for different media types (text, images, etc.). It offers Semantic Search, Question-Answer Extraction, Classification, Customizable Models (PyTorch/TensorFlow/Keras), etc. Built from scratch in Go, Weaviate stores both objects and vectors, allowing for combining vector search with structured filtering and the fault tolerance of a cloud-native database. It is all accessible through GraphQL, REST, and various client-side programming languages.
-
-## Installation and Setup
-
-Install the Python SDK:
-
-```bash
-pip install langchain-weaviate
-```
-
-
-## Vector Store
-
-There exists a wrapper around `Weaviate` indexes, allowing you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-To import this vectorstore:
-```python
-from langchain_weaviate import WeaviateVectorStore
-```
-
-For a more detailed walkthrough of the Weaviate wrapper, see [this notebook](/docs/integrations/vectorstores/weaviate)
diff --git a/langchain_md_files/integrations/providers/whatsapp.mdx b/langchain_md_files/integrations/providers/whatsapp.mdx
deleted file mode 100644
index dbe45e1b865bf9049d2ac3d8214f850ced36b1f0..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/whatsapp.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
-# WhatsApp
-
->[WhatsApp](https://www.whatsapp.com/) (also called `WhatsApp Messenger`) is a freeware, cross-platform, centralized instant messaging (IM) and voice-over-IP (VoIP) service. It allows users to send text and voice messages, make voice and video calls, and share images, documents, user locations, and other content.
-
-
-## Installation and Setup
-
-There isn't any special setup for it.
-
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/whatsapp_chat).
-
-```python
-from langchain_community.document_loaders import WhatsAppChatLoader
-```
diff --git a/langchain_md_files/integrations/providers/wikipedia.mdx b/langchain_md_files/integrations/providers/wikipedia.mdx
deleted file mode 100644
index cf1b08a50a65f052141ebbebd4810572584820fe..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/wikipedia.mdx
+++ /dev/null
@@ -1,28 +0,0 @@
-# Wikipedia
-
->[Wikipedia](https://wikipedia.org/) is a multilingual free online encyclopedia written and maintained by a community of volunteers, known as Wikipedians, through open collaboration and using a wiki-based editing system called MediaWiki. `Wikipedia` is the largest and most-read reference work in history.
-
-
-## Installation and Setup
-
-```bash
-pip install wikipedia
-```
-
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/wikipedia).
-
-```python
-from langchain_community.document_loaders import WikipediaLoader
-```
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/wikipedia).
-
-```python
-from langchain.retrievers import WikipediaRetriever
-```
diff --git a/langchain_md_files/integrations/providers/wolfram_alpha.mdx b/langchain_md_files/integrations/providers/wolfram_alpha.mdx
deleted file mode 100644
index f4c7ae3a2eb693d64e27e63ceb122a512a3c8599..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/wolfram_alpha.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
-# Wolfram Alpha
-
->[WolframAlpha](https://en.wikipedia.org/wiki/WolframAlpha) is an answer engine developed by `Wolfram Research`. 
-> It answers factual queries by computing answers from externally sourced data.
-
-This page covers how to use the `Wolfram Alpha API` within LangChain.
-
-## Installation and Setup
-- Install requirements with 
-```bash
-pip install wolframalpha
-```
-- Go to wolfram alpha and sign up for a developer account [here](https://developer.wolframalpha.com/)
-- Create an app and get your `APP ID`
-- Set your APP ID as an environment variable `WOLFRAM_ALPHA_APPID`
-
-
-## Wrappers
-
-### Utility
-
-There exists a WolframAlphaAPIWrapper utility which wraps this API. To import this utility:
-
-```python
-from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
-```
-
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/wolfram_alpha).
-
-### Tool
-
-You can also easily load this wrapper as a Tool (to use with an Agent).
-You can do this with:
-```python
-from langchain.agents import load_tools
-tools = load_tools(["wolfram-alpha"])
-```
-
-For more information on tools, see [this page](/docs/how_to/tools_builtin).
diff --git a/langchain_md_files/integrations/providers/writer.mdx b/langchain_md_files/integrations/providers/writer.mdx
deleted file mode 100644
index 8da193c2e0721a9bac1612503f7f779279d18ce7..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/writer.mdx
+++ /dev/null
@@ -1,55 +0,0 @@
----
-keywords: [writer]
----
-
-# Writer, Inc.
-
-All functionality related to Writer
-
-
->This page covers how to use the [Writer](https://writer.com/) ecosystem within LangChain. For further information see Writer [docs](https://dev.writer.com/home/introduction).
->[Palmyra](https://writer.com/blog/palmyra/) is a Large Language Model (LLM) developed by `Writer, Inc`.
->
->The [Writer API](https://dev.writer.com/api-guides/introduction) is powered by a diverse set of Palmyra sub-models with different capabilities and price points.
-
-## Installation and Setup
-
-Install the integration package with
-```bash
-pip install langchain-writer
-```
-
-Get an Writer API key and set it as an environment variable (`WRITER_API_KEY`)
-
-## Chat model
-
-```python
-from langchain_writer import ChatWriter
-```
-
-## PDF Parser
-
-
-```python
-from langchain_writer.pdf_parser import PDFParser
-```
-
-## Text splitter
-
-```python
-from langchain_writer.text_splitter import WriterTextSplitter
-```
-
-## Tools calling
-
-### Functions
-
-Support of basic function calls defined via dicts, Pydantic, python functions etc.
-
-### Graphs
-
-```python
-from langchain_writer.tools import GraphTool
-```
-
-Writer-specific remotely invoking tool
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/xata.mdx b/langchain_md_files/integrations/providers/xata.mdx
deleted file mode 100644
index 986468d63c7321a302eab2cb78e8f6f6b2a8b859..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/xata.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
-# Xata
-
-> [Xata](https://xata.io) is a serverless data platform, based on `PostgreSQL`. 
-> It provides a Python SDK for interacting with your database, and a UI 
-> for managing your data.
-> `Xata` has a native vector type, which can be added to any table, and 
-> supports similarity search. LangChain inserts vectors directly to `Xata`, 
-> and queries it for the nearest neighbors of a given vector, so that you can
-> use all the LangChain Embeddings integrations with `Xata`.
-
-
-## Installation and Setup
-
-
-We need to install `xata` python package.
-
-```bash
-pip install xata==1.0.0a7 
-```
-
-## Vector Store
-
-See a [usage example](/docs/integrations/vectorstores/xata).
-
-```python
-from langchain_community.vectorstores import XataVectorStore
-```
-
-## Memory
-
-See a [usage example](/docs/integrations/memory/xata_chat_message_history).
-
-```python
-from langchain_community.chat_message_histories import XataChatMessageHistory
-```
-
diff --git a/langchain_md_files/integrations/providers/xinference.mdx b/langchain_md_files/integrations/providers/xinference.mdx
deleted file mode 100644
index 97f32b6c1a0a7f8f9c92f849bb54b20277827323..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/xinference.mdx
+++ /dev/null
@@ -1,102 +0,0 @@
-# Xorbits Inference (Xinference)
-
-This page demonstrates how to use [Xinference](https://github.com/xorbitsai/inference)
-with LangChain.
-
-`Xinference` is a powerful and versatile library designed to serve LLMs, 
-speech recognition models, and multimodal models, even on your laptop. 
-With Xorbits Inference, you can effortlessly deploy and serve your or 
-state-of-the-art built-in models using just a single command.
-
-## Installation and Setup
-
-Xinference can be installed via pip from PyPI: 
-
-```bash
-pip install "xinference[all]"
-```
-
-## LLM
-
-Xinference supports various models compatible with GGML, including chatglm, baichuan, whisper, 
-vicuna, and orca. To view the builtin models, run the command:
-
-```bash
-xinference list --all
-```
-
-
-### Wrapper for Xinference
-
-You can start a local instance of Xinference by running:
-
-```bash
-xinference
-```
-
-You can also deploy Xinference in a distributed cluster. To do so, first start an Xinference supervisor
-on the server you want to run it:
-
-```bash
-xinference-supervisor -H "${supervisor_host}"
-```
-
-
-Then, start the Xinference workers on each of the other servers where you want to run them on:
-
-```bash
-xinference-worker -e "http://${supervisor_host}:9997"
-```
-
-You can also start a local instance of Xinference by running:
-
-```bash
-xinference
-```
-
-Once Xinference is running, an endpoint will be accessible for model management via CLI or 
-Xinference client. 
-
-For local deployment, the endpoint will be http://localhost:9997. 
-
-
-For cluster deployment, the endpoint will be http://$\{supervisor_host\}:9997.
-
-
-Then, you need to launch a model. You can specify the model names and other attributes 
-including model_size_in_billions and quantization. You can use command line interface (CLI) to 
-do it. For example, 
-
-```bash
-xinference launch -n orca -s 3 -q q4_0
-```
-
-A model uid will be returned.
-
-Example usage:
-
-```python
-from langchain_community.llms import Xinference
-
-llm = Xinference(
-    server_url="http://0.0.0.0:9997",
-    model_uid = {model_uid} # replace model_uid with the model UID return from launching the model
-)
-
-llm(
-    prompt="Q: where can we visit in the capital of France? A:",
-    generate_config={"max_tokens": 1024, "stream": True},
-)
-
-```
-
-### Usage
-
-For more information and detailed examples, refer to the
-[example for xinference LLMs](/docs/integrations/llms/xinference)
-
-### Embeddings
-
-Xinference also supports embedding queries and documents. See
-[example for xinference embeddings](/docs/integrations/text_embedding/xinference) 
-for a more detailed demo.
\ No newline at end of file
diff --git a/langchain_md_files/integrations/providers/yahoo.mdx b/langchain_md_files/integrations/providers/yahoo.mdx
deleted file mode 100644
index 70023477f5597b4d09886e1607ebe642f281a825..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/yahoo.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-# Yahoo
-
->[Yahoo (Wikipedia)](https://en.wikipedia.org/wiki/Yahoo) is an American web services provider.
->
-> It provides a web portal, search engine Yahoo Search, and related 
-> services, including `My Yahoo`, `Yahoo Mail`, `Yahoo News`, 
-> `Yahoo Finance`, `Yahoo Sports` and its advertising platform, `Yahoo Native`.
-
-
-## Tools
-
-### Yahoo Finance News
-
-We have to install a python package:
-
-```bash
-pip install yfinance
-```
-See a [usage example](/docs/integrations/tools/yahoo_finance_news).
-
-
-```python
-from langchain_community.tools import YahooFinanceNewsTool
-```
diff --git a/langchain_md_files/integrations/providers/yandex.mdx b/langchain_md_files/integrations/providers/yandex.mdx
deleted file mode 100644
index 40600d89f30732fcc1bbf049aa240993e415669c..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/yandex.mdx
+++ /dev/null
@@ -1,56 +0,0 @@
-# Yandex
-
-All functionality related to Yandex Cloud
-
->[Yandex Cloud](https://cloud.yandex.com/en/) is a public cloud platform. 
-
-## Installation and Setup
-
-Yandex Cloud SDK can be installed via pip from PyPI: 
-
-```bash
-pip install yandexcloud
-```
-
-## LLMs
-
-### YandexGPT
-
-See a [usage example](/docs/integrations/llms/yandex).
-
-```python
-from langchain_community.llms import YandexGPT
-```
-
-## Chat models
-
-### YandexGPT
-
-See a [usage example](/docs/integrations/chat/yandex).
-
-```python
-from langchain_community.chat_models import ChatYandexGPT
-```
-
-## Embedding models
-
-### YandexGPT
-
-See a [usage example](/docs/integrations/text_embedding/yandex).
-
-```python
-from langchain_community.embeddings import YandexGPTEmbeddings
-```
-
-## Parser
-
-### YandexSTTParser
-
-It transcribes and parses audio files. 
-
-`YandexSTTParser` is similar to the `OpenAIWhisperParser`.
-See a [usage example with OpenAIWhisperParser](/docs/integrations/document_loaders/youtube_audio).
-
-```python
-from langchain_community.document_loaders import YandexSTTParser
-```
diff --git a/langchain_md_files/integrations/providers/yeagerai.mdx b/langchain_md_files/integrations/providers/yeagerai.mdx
deleted file mode 100644
index 6483cce900151cd054c250aaafd5fdc9886032cf..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/yeagerai.mdx
+++ /dev/null
@@ -1,43 +0,0 @@
-# Yeager.ai
-
-This page covers how to use [Yeager.ai](https://yeager.ai) to generate LangChain tools and agents.
-
-## What is Yeager.ai?
-Yeager.ai is an ecosystem designed to simplify the process of creating AI agents and tools. 
-
-It features yAgents, a No-code LangChain Agent Builder, which enables users to build, test, and deploy AI solutions with ease. Leveraging the LangChain framework, yAgents allows seamless integration with various language models and resources, making it suitable for developers, researchers, and AI enthusiasts across diverse applications.
-
-## yAgents
-Low code generative agent designed to help you build, prototype, and deploy Langchain tools with ease. 
-
-### How to use?
-```
-pip install yeagerai-agent
-yeagerai-agent
-```
-Go to http://127.0.0.1:7860
-
-This will install the necessary dependencies and set up yAgents on your system. After the first run, yAgents will create a .env file where you can input your OpenAI API key. You can do the same directly from the Gradio interface under the tab "Settings".
-
-`OPENAI_API_KEY=<your_openai_api_key_here>`
-
-We recommend using GPT-4,. However, the tool can also work with GPT-3 if the problem is broken down sufficiently.
-
-### Creating and Executing Tools with yAgents
-yAgents makes it easy to create and execute AI-powered tools. Here's a brief overview of the process:
-1. Create a tool: To create a tool, provide a natural language prompt to yAgents. The prompt should clearly describe the tool's purpose and functionality. For example:
-`create a tool that returns the n-th prime number`
-
-2. Load the tool into the toolkit: To load a tool into yAgents, simply provide a command to yAgents that says so. For example:
-`load the tool that you just created it into your toolkit`
-
-3. Execute the tool: To run a tool or agent, simply provide a command to yAgents that includes the name of the tool and any required parameters. For example:
-`generate the 50th prime number`
-
-You can see a video of how it works [here](https://www.youtube.com/watch?v=KA5hCM3RaWE).
-
-As you become more familiar with yAgents, you can create more advanced tools and agents to automate your work and enhance your productivity.
-
-For more information, see [yAgents' Github](https://github.com/yeagerai/yeagerai-agent) or our [docs](https://yeagerai.gitbook.io/docs/general/welcome-to-yeager.ai) 
-
-
diff --git a/langchain_md_files/integrations/providers/yellowbrick.mdx b/langchain_md_files/integrations/providers/yellowbrick.mdx
deleted file mode 100644
index 911f1ab52503af2b9e43af3ab64b5a324e7842f2..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/yellowbrick.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Yellowbrick
-
->[Yellowbrick](https://yellowbrick.com/) is a provider of 
-> Enterprise Data Warehousing, Ad-hoc and Streaming Analytics, 
-> BI and AI workloads. 
-
-## Vector store
-
-We have to install a python package:
-
-```bash
-pip install psycopg2
-```
-
-```python
-from langchain_community.vectorstores import Yellowbrick
-```
diff --git a/langchain_md_files/integrations/providers/yi.mdx b/langchain_md_files/integrations/providers/yi.mdx
deleted file mode 100644
index e26590ac82974d2559d74f4432a5a31222c1857a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/yi.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-# 01.AI
-
->[01.AI](https://www.lingyiwanwu.com/en), founded by Dr. Kai-Fu Lee, is a global company at the forefront of AI 2.0. They offer cutting-edge large language models, including the Yi series, which range from 6B to hundreds of billions of parameters. 01.AI also provides multimodal models, an open API platform, and open-source options like Yi-34B/9B/6B and Yi-VL.
-
-## Installation and Setup
-
-Register and get an API key from either the China site [here](https://platform.lingyiwanwu.com/apikeys) or the global site [here](https://platform.01.ai/apikeys).
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/yi).
-
-```python
-from langchain_community.llms import YiLLM
-```
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/yi).
-
-```python
-from langchain_community.chat_models import ChatYi
-```
diff --git a/langchain_md_files/integrations/providers/you.mdx b/langchain_md_files/integrations/providers/you.mdx
deleted file mode 100644
index eb088be77b89b8de65ea3e63c5c4ebc9d7c915d0..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/you.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# You
-
->[You](https://you.com/about) company provides an AI productivity platform.
-
-## Retriever
-
-See a [usage example](/docs/integrations/retrievers/you-retriever).
-
-```python
-from langchain_community.retrievers.you import YouRetriever
-```
-
-## Tools
-
-See a [usage example](/docs/integrations/tools/you).
-
-```python
-from langchain_community.tools.you import YouSearchTool
-```
diff --git a/langchain_md_files/integrations/providers/youtube.mdx b/langchain_md_files/integrations/providers/youtube.mdx
deleted file mode 100644
index 8f3d69b819b5567bd4a3972ccb2b750170d16fa2..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/youtube.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# YouTube
-
->[YouTube](https://www.youtube.com/) is an online video sharing and social media platform by Google.
-> We download the `YouTube` transcripts and video information.
-
-## Installation and Setup
-
-```bash
-pip install youtube-transcript-api
-pip install pytube
-```
-See a [usage example](/docs/integrations/document_loaders/youtube_transcript).
-
-
-## Document Loader
-
-See a [usage example](/docs/integrations/document_loaders/youtube_transcript).
-
-```python
-from langchain_community.document_loaders import YoutubeLoader
-from langchain_community.document_loaders import GoogleApiYoutubeLoader
-```
diff --git a/langchain_md_files/integrations/providers/zep.mdx b/langchain_md_files/integrations/providers/zep.mdx
deleted file mode 100644
index 343bfd83a95866d6acd4e3aa37a59200daa404df..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/zep.mdx
+++ /dev/null
@@ -1,120 +0,0 @@
-# Zep
-> Recall, understand, and extract data from chat histories. Power personalized AI experiences.
-
->[Zep](https://www.getzep.com) is a long-term memory service for AI Assistant apps.
-> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,
-> while also reducing hallucinations, latency, and cost.
-
-## How Zep works
-
-Zep persists and recalls chat histories, and automatically generates summaries and other artifacts from these chat histories.
-It also embeds messages and summaries, enabling you to search Zep for relevant context from past conversations.
-Zep does all of this asynchronously, ensuring these operations don't impact your user's chat experience.
-Data is persisted to database, allowing you to scale out when growth demands.
-
-Zep also provides a simple, easy to use abstraction for document vector search called Document Collections.
-This is designed to complement Zep's core memory features, but is not designed to be a general purpose vector database.
-
-Zep allows you to be more intentional about constructing your prompt:
-- automatically adding a few recent messages, with the number customized for your app;
-- a summary of recent conversations prior to the messages above;
-- and/or contextually relevant summaries or messages surfaced from the entire chat session.
-- and/or relevant Business data from Zep Document Collections.
-
-## What is Zep Cloud?
-[Zep Cloud](https://www.getzep.com) is a managed service with Zep Open Source at its core.
-In addition to Zep Open Source's memory management features, Zep Cloud offers:
-- **Fact Extraction**: Automatically build fact tables from conversations, without having to define a data schema upfront.
-- **Dialog Classification**: Instantly and accurately classify chat dialog. Understand user intent and emotion, segment users, and more. Route chains based on semantic context, and trigger events.
-- **Structured Data Extraction**: Quickly extract business data from chat conversations using a schema you define. Understand what your Assistant should ask for next in order to complete its task.
-
-
-
-## Zep Open Source
-Zep offers an open source version with a self-hosted option.
-Please refer to the [Zep Open Source](https://github.com/getzep/zep) repo for more information.
-You can also find Zep Open Source compatible [Retriever](/docs/integrations/retrievers/zep_memorystore), [Vector Store](/docs/integrations/vectorstores/zep) and [Memory](/docs/integrations/memory/zep_memory) examples
-
-## Zep Cloud Installation and Setup
-
-[Zep Cloud Docs](https://help.getzep.com)
-
-1. Install the Zep Cloud SDK:
-
-```bash
-pip install zep_cloud
-```
-or
-```bash
-poetry add zep_cloud
-```
-
-## Memory
-
-Zep's Memory API persists your users' chat history and metadata to a [Session](https://help.getzep.com/chat-history-memory/sessions), enriches the memory, and
-enables vector similarity search over historical chat messages and dialog summaries.
-
-Zep offers several approaches to populating prompts with context from historical conversations.
-
-### Perpetual Memory
-This is the default memory type.
-Salient facts from the dialog are extracted and stored in a Fact Table.
-This is updated in real-time as new messages are added to the Session.
-Every time you call the Memory API to get a Memory, Zep returns the Fact Table, the most recent messages (per your Message Window setting), and a summary of the most recent messages prior to the Message Window.
-The combination of the Fact Table, summary, and the most recent messages in a prompts provides both factual context and nuance to the LLM.
-
-### Summary Retriever Memory
-Returns the most recent messages and a summary of past messages relevant to the current conversation,
-enabling you to provide your Assistant with helpful context from past conversations
-
-### Message Window Buffer Memory
-Returns the most recent N messages from the current conversation.
-
-Additionally, Zep enables vector similarity searches for Messages or Summaries stored within its system.
-
-This feature lets you populate prompts with past conversations that are contextually similar to a specific query,
-organizing the results by a similarity Score.
-
-`ZepCloudChatMessageHistory` and `ZepCloudMemory` classes can be imported to interact with Zep Cloud APIs.
-
-`ZepCloudChatMessageHistory` is compatible with `RunnableWithMessageHistory`.
-```python
-from langchain_community.chat_message_histories import ZepCloudChatMessageHistory
-```
-
-See a [Perpetual Memory Example here](/docs/integrations/memory/zep_cloud_chat_message_history).
-
-You can use `ZepCloudMemory` together with agents that support Memory.
-```python
-from langchain_community.memory import ZepCloudMemory
-```
-
-See a [Memory RAG Example here](/docs/integrations/memory/zep_memory_cloud).
-
-## Retriever
-
-Zep's Memory Retriever is a LangChain Retriever that enables you to retrieve messages from a Zep Session and use them to construct your prompt.
-
-The Retriever supports searching over both individual messages and summaries of conversations. The latter is useful for providing rich, but succinct context to the LLM as to relevant past conversations.
-
-Zep's Memory Retriever supports both similarity search and [Maximum Marginal Relevance (MMR) reranking](https://help.getzep.com/working-with-search#how-zeps-mmr-re-ranking-works). MMR search is useful for ensuring that the retrieved messages are diverse and not too similar to each other
-
-See a [usage example](/docs/integrations/retrievers/zep_cloud_memorystore).
-
-```python
-from langchain_community.retrievers import ZepCloudRetriever
-```
-
-## Vector store
-
-Zep's [Document VectorStore API](https://help.getzep.com/document-collections) enables you to store and retrieve documents using vector similarity search. Zep doesn't require you to understand
-distance functions, types of embeddings, or indexing best practices. You just pass in your chunked documents, and Zep handles the rest.
-
-Zep supports both similarity search and [Maximum Marginal Relevance (MMR) reranking](https://help.getzep.com/working-with-search#how-zeps-mmr-re-ranking-works).
-MMR search is useful for ensuring that the retrieved documents are diverse and not too similar to each other.
-
-```python
-from langchain_community.vectorstores import ZepCloudVectorStore
-```
-
-See a [usage example](/docs/integrations/vectorstores/zep_cloud).
diff --git a/langchain_md_files/integrations/providers/zhipuai.mdx b/langchain_md_files/integrations/providers/zhipuai.mdx
deleted file mode 100644
index 0bcad6c4f4850be9fae6f9dd4da7ab181e359ebe..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/zhipuai.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
-# Zhipu AI
-
->[Zhipu AI](https://www.zhipuai.cn/en/aboutus), originating from the technological 
-> advancements of `Tsinghua University's Computer Science Department`, 
-> is an artificial intelligence company with the mission of teaching machines 
-> to think like humans. Its world-leading AI team has developed the cutting-edge 
-> large language and multimodal models and built the high-precision billion-scale 
-> knowledge graphs, the combination of which uniquely empowers us to create a powerful 
-> data- and knowledge-driven cognitive engine towards artificial general intelligence.
-
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/zhipuai).
-
-```python
-from langchain_community.chat_models import ChatZhipuAI
-```
diff --git a/langchain_md_files/integrations/providers/zilliz.mdx b/langchain_md_files/integrations/providers/zilliz.mdx
deleted file mode 100644
index 6170afd351e08a17b40db0cff1c7431c896dbaff..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/providers/zilliz.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
-# Zilliz
-
->[Zilliz Cloud](https://zilliz.com/doc/quick_start) is a fully managed service on cloud for `LF AI Milvus®`,
-
-
-## Installation and Setup
-
-Install the Python SDK:
-```bash
-pip install pymilvus
-```
-
-## Vectorstore
-
-A wrapper around Zilliz indexes allows you to use it as a vectorstore,
-whether for semantic search or example selection.
-
-```python
-from langchain_community.vectorstores import Milvus
-```
-
-For a more detailed walkthrough of the Miluvs wrapper, see [this notebook](/docs/integrations/vectorstores/zilliz)
diff --git a/langchain_md_files/integrations/retrievers/graph_rag.mdx b/langchain_md_files/integrations/retrievers/graph_rag.mdx
deleted file mode 100644
index ca70b00852d020cf7ccad205dec52319638761f9..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/retrievers/graph_rag.mdx
+++ /dev/null
@@ -1,379 +0,0 @@
----
-sidebar_label: Graph RAG
-description: Graph traversal over any Vector Store using document metadata.
----
-
-import ChatModelTabs from "@theme/ChatModelTabs";
-import EmbeddingTabs from "@theme/EmbeddingTabs";
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-
-# Graph RAG
-
-This guide provides an introduction to Graph RAG. For detailed documentation of all
-supported features and configurations, refer to the
-[Graph RAG Project Page](https://datastax.github.io/graph-rag/).
-
-## Overview
-
-The `GraphRetriever` from the `langchain-graph-retriever` package provides a LangChain
-[retriever](/docs/concepts/retrievers/) that combines **unstructured** similarity search
-on vectors with **structured** traversal of metadata properties. This enables graph-based
-retrieval over an **existing** vector store.
-
-### Integration details
-
-| Retriever | Source | PyPI Package | Latest | Project Page |
-| :--- | :--- | :---: | :---: | :---: |
-| GraphRetriever | [github.com/datastax/graph-rag](https://github.com/datastax/graph-rag/tree/main/packages/langchain-graph-retriever) | [langchain-graph-retriever](https://pypi.org/project/langchain-graph-retriever/) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-graph-retriever?style=flat-square&label=%20&color=orange) | [Graph RAG](https://datastax.github.io/graph-rag/) |
-
-
-## Benefits
-
-* [**Link based on existing metadata:**](https://datastax.github.io/graph-rag/get-started/)
-  Use existing metadata fields without additional processing. Retrieve more from an
-  existing vector store!
-
-* [**Change links on demand:**](https://datastax.github.io/graph-rag/get-started/edges/)
-  Edges can be specified on-the-fly, allowing different relationships to be traversed
-  based on the question.
-
-
-* [**Pluggable Traversal Strategies:**](https://datastax.github.io/graph-rag/get-started/strategies/)
-  Use built-in traversal strategies like Eager or MMR, or define custom logic to select
-  which nodes to explore.
-
-* [**Broad compatibility:**](https://datastax.github.io/graph-rag/get-started/adapters/)
-  Adapters are available for a variety of vector stores with support for additional
-  stores easily added.
-
-## Setup
-
-### Installation
-
-This retriever lives in the `langchain-graph-retriever` package.
-
-```bash
-pip install -qU langchain-graph-retriever
-```
-## Instantiation
-
-The following examples will show how to perform graph traversal over some sample
-Documents about animals.
-
-### Prerequisites
-
-<details>
-  <summary>Toggle for Details</summary>
-  <div>
-    1. Ensure you have Python 3.10+ installed
-
-    1. Install the following package that provides sample data.
-        ```bash
-        pip install -qU graph_rag_example_helpers
-        ```
-
-    1. Download the test documents:
-        ```python
-        from graph_rag_example_helpers.datasets.animals import fetch_documents
-        animals = fetch_documents()
-        ```
-
-    1. <EmbeddingTabs/>
-  </div>
-</details>
-
-### Populating the Vector store
-
-This section shows how to populate a variety of vector stores with the sample data.
-
-For help on choosing one of the vector stores below, or to add support for your
-vector store, consult the documentation about
-[Adapters and Supported Stores](https://datastax.github.io/graph-rag/guide/adapters/).
-
-<Tabs groupId="vector-store" queryString>
-  <TabItem value="astra-db" label="AstraDB" default>
-    <div style={{ paddingLeft: '30px' }}>
-      Install the `langchain-graph-retriever` package with the `astra` extra:
-
-      ```bash
-      pip install "langchain-graph-retriever[astra]"
-      ```
-
-      Then create a vector store and load the test documents:
-
-      ```python
-      from langchain_astradb import AstraDBVectorStore
-
-      vector_store = AstraDBVectorStore.from_documents(
-          documents=animals,
-          embedding=embeddings,
-          collection_name="animals",
-          api_endpoint=ASTRA_DB_API_ENDPOINT,
-          token=ASTRA_DB_APPLICATION_TOKEN,
-      )
-      ```
-      For the `ASTRA_DB_API_ENDPOINT` and `ASTRA_DB_APPLICATION_TOKEN` credentials,
-      consult the [AstraDB Vector Store Guide](/docs/integrations/vectorstores/astradb).
-
-      :::note
-      For faster initial testing, consider using the **InMemory** Vector Store.
-      :::
-    </div>
-  </TabItem>
-  <TabItem value="cassandra" label="Apache Cassandra">
-    <div style={{ paddingLeft: '30px' }}>
-      Install the `langchain-graph-retriever` package with the `cassandra` extra:
-
-      ```bash
-      pip install "langchain-graph-retriever[cassandra]"
-      ```
-
-      Then create a vector store and load the test documents:
-
-      ```python
-      from langchain_community.vectorstores.cassandra import Cassandra
-      from langchain_graph_retriever.transformers import ShreddingTransformer
-
-      vector_store = Cassandra.from_documents(
-          documents=list(ShreddingTransformer().transform_documents(animals)),
-          embedding=embeddings,
-          table_name="animals",
-      )
-      ```
-
-      For help creating a Cassandra connection, consult the
-      [Apache Cassandra Vector Store Guide](/docs/integrations/vectorstores/cassandra#connection-parameters)
-
-      :::note
-      Apache Cassandra doesn't support searching in nested metadata. Because of this
-      it is necessary to use the [`ShreddingTransformer`](https://datastax.github.io/graph-rag/reference/langchain_graph_retriever/transformers/#langchain_graph_retriever.transformers.shredding.ShreddingTransformer)
-      when inserting documents.
-      :::
-    </div>
-  </TabItem>
-  <TabItem value="opensearch" label="OpenSearch">
-    <div style={{ paddingLeft: '30px' }}>
-      Install the `langchain-graph-retriever` package with the `opensearch` extra:
-
-      ```bash
-      pip install "langchain-graph-retriever[opensearch]"
-      ```
-
-      Then create a vector store and load the test documents:
-
-      ```python
-      from langchain_community.vectorstores import OpenSearchVectorSearch
-
-      vector_store = OpenSearchVectorSearch.from_documents(
-          documents=animals,
-          embedding=embeddings,
-          engine="faiss",
-          index_name="animals",
-          opensearch_url=OPEN_SEARCH_URL,
-          bulk_size=500,
-      )
-      ```
-
-      For help creating an OpenSearch connection, consult the
-      [OpenSearch Vector Store Guide](/docs/integrations/vectorstores/opensearch).
-    </div>
-  </TabItem>
-  <TabItem value="chroma" label="Chroma">
-    <div style={{ paddingLeft: '30px' }}>
-      Install the `langchain-graph-retriever` package with the `chroma` extra:
-
-      ```bash
-      pip install "langchain-graph-retriever[chroma]"
-      ```
-
-      Then create a vector store and load the test documents:
-
-      ```python
-      from langchain_chroma.vectorstores import Chroma
-      from langchain_graph_retriever.transformers import ShreddingTransformer
-
-      vector_store = Chroma.from_documents(
-          documents=list(ShreddingTransformer().transform_documents(animals)),
-          embedding=embeddings,
-          collection_name="animals",
-      )
-      ```
-
-      For help creating an Chroma connection, consult the
-      [Chroma Vector Store Guide](/docs/integrations/vectorstores/chroma).
-
-      :::note
-      Chroma doesn't support searching in nested metadata. Because of this
-      it is necessary to use the [`ShreddingTransformer`](https://datastax.github.io/graph-rag/reference/langchain_graph_retriever/transformers/#langchain_graph_retriever.transformers.shredding.ShreddingTransformer)
-      when inserting documents.
-      :::
-    </div>
-  </TabItem>
-  <TabItem value="in-memory" label="InMemory" default>
-    <div style={{ paddingLeft: '30px' }}>
-      Install the `langchain-graph-retriever` package:
-
-      ```bash
-      pip install "langchain-graph-retriever"
-      ```
-
-      Then create a vector store and load the test documents:
-
-      ```python
-      from langchain_core.vectorstores import InMemoryVectorStore
-
-      vector_store = InMemoryVectorStore.from_documents(
-          documents=animals,
-          embedding=embeddings,
-      )
-      ```
-
-      :::tip
-      Using the `InMemoryVectorStore` is the fastest way to get started with Graph RAG
-      but it isn't recommended for production use. Instead it is recommended to use
-      **AstraDB** or **OpenSearch**.
-      :::
-    </div>
-  </TabItem>
-</Tabs>
-
-### Graph Traversal
-
-This graph retriever starts with a single animal that best matches the query, then
-traverses to other animals sharing the same `habitat` and/or `origin`.
-
-  ```python
-  from graph_retriever.strategies import Eager
-  from langchain_graph_retriever import GraphRetriever
-
-  traversal_retriever = GraphRetriever(
-      store = vector_store,
-      edges = [("habitat", "habitat"), ("origin", "origin")],
-      strategy = Eager(k=5, start_k=1, max_depth=2),
-  )
-  ```
-
-The above creates a graph traversing retriever that starts with the nearest
-animal (`start_k=1`), retrieves 5 documents (`k=5`) and limits the search to documents
-that are at most 2 steps away from the first animal (`max_depth=2`).
-
-The `edges` define how metadata values can be used for traversal. In this case, every
-animal is connected to other animals with the same `habitat` and/or `origin`.
-
-```python
-results = traversal_retriever.invoke("what animals could be found near a capybara?")
-
-for doc in results:
-    print(f"{doc.id}: {doc.page_content}")
-```
-
-```output
-capybara: capybaras are the largest rodents in the world and are highly social animals.
-heron: herons are wading birds known for their long legs and necks, often seen near water.
-crocodile: crocodiles are large reptiles with powerful jaws and a long lifespan, often living over 70 years.
-frog: frogs are amphibians known for their jumping ability and croaking sounds.
-duck: ducks are waterfowl birds known for their webbed feet and quacking sounds.
-```
-
-Graph traversal improves retrieval quality by leveraging structured relationships in
-the data. Unlike standard similarity search (see below), it provides a clear,
-explainable rationale for why documents are selected.
-
-In this case, the documents `capybara`, `heron`, `frog`, `crocodile`, and `newt` all
-share the same `habitat=wetlands`, as defined by their metadata. This should increase
-Document Relevance and the quality of the answer from the LLM.
-
-### Comparison to Standard Retrieval
-
-When `max_depth=0`, the graph traversing retriever behaves like a standard retriever:
-
-```python
-standard_retriever = GraphRetriever(
-    store = vector_store,
-    edges = [("habitat", "habitat"), ("origin", "origin")],
-    strategy = Eager(k=5, start_k=5, max_depth=0),
-)
-```
-
-This creates a retriever that starts with the nearest 5 animals (`start_k=5`),
-and returns them without any traversal (`max_depth=0`). The edge definitions
-are ignored in this case.
-
-This is essentially the same as:
-
-```python
-standard_retriever = vector_store.as_retriever(search_kwargs={"k":5})
-```
-
-For either case, invoking the retriever returns:
-
-```python
-results = standard_retriever.invoke("what animals could be found near a capybara?")
-
-for doc in results:
-    print(f"{doc.id}: {doc.page_content}")
-```
-
-```output
-capybara: capybaras are the largest rodents in the world and are highly social animals.
-iguana: iguanas are large herbivorous lizards often found basking in trees and near water.
-guinea pig: guinea pigs are small rodents often kept as pets due to their gentle and social nature.
-hippopotamus: hippopotamuses are large semi-aquatic mammals known for their massive size and territorial behavior.
-boar: boars are wild relatives of pigs, known for their tough hides and tusks.
-```
-
-These documents are joined based on similarity alone. Any structural data that existed
-in the store is ignored. As compared to graph retrieval, this can decrease Document
-Relevance because the returned results have a lower chance of being helpful to answer
-the query.
-
-## Usage
-
-Following the examples above, `.invoke` is used to initiate retrieval on a query.
-
-## Use within a chain
-
-Like other retrievers, `GraphRetriever` can be incorporated into LLM applications
-via [chains](/docs/how_to/sequence/).
-
-<ChatModelTabs customVarName="llm" />
-
-```python
-from langchain_core.output_parsers import StrOutputParser
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.runnables import RunnablePassthrough
-
-prompt = ChatPromptTemplate.from_template(
-"""Answer the question based only on the context provided.
-
-Context: {context}
-
-Question: {question}"""
-)
-
-def format_docs(docs):
-    return "\n\n".join(f"text: {doc.page_content} metadata: {doc.metadata}" for doc in docs)
-
-chain = (
-    {"context": traversal_retriever | format_docs, "question": RunnablePassthrough()}
-    | prompt
-    | llm
-    | StrOutputParser()
-)
-```
-
-```python
-chain.invoke("what animals could be found near a capybara?")
-```
-
-```output
-Animals that could be found near a capybara include herons, crocodiles, frogs,
-and ducks, as they all inhabit wetlands.
-```
-
-## API reference
-
-To explore all available parameters and advanced configurations, refer to the
-[Graph RAG API reference](https://datastax.github.io/graph-rag/reference/).
diff --git a/langchain_md_files/integrations/retrievers/index.mdx b/langchain_md_files/integrations/retrievers/index.mdx
deleted file mode 100644
index 4bbc458c563fae01a0fab170f3c315f7f3f81f8a..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/retrievers/index.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
----
-
-import {CategoryTable, IndexTable} from '@theme/FeatureTables'
-
-# Retrievers
-
-A [retriever](/docs/concepts/retrievers) is an interface that returns documents given an unstructured query.
-It is more general than a vector store.
-A retriever does not need to be able to store documents, only to return (or retrieve) them.
-Retrievers can be created from vector stores, but are also broad enough to include [Wikipedia search](/docs/integrations/retrievers/wikipedia/) and [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/).
-
-Retrievers accept a string query as input and return a list of [Documents](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) as output.
-
-For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers).
-
-Note that all [vector stores](/docs/concepts/vectorstores) can be [cast to retrievers](/docs/how_to/vectorstore_retriever/).
-Refer to the vector store [integration docs](/docs/integrations/vectorstores/) for available vector stores.
-This page lists custom retrievers, implemented via subclassing [BaseRetriever](/docs/how_to/custom_retriever/).
-
-## Bring-your-own documents
-
-The below retrievers allow you to index and search a custom corpus of documents.
-
-<CategoryTable category="document_retrievers" />
-
-## External index
-
-The below retrievers will search over an external index (e.g., constructed from Internet data or similar).
-
-<CategoryTable category="external_retrievers" />
-
-## All retrievers
-
-<IndexTable />
diff --git a/langchain_md_files/integrations/retrievers/self_query/index.mdx b/langchain_md_files/integrations/retrievers/self_query/index.mdx
deleted file mode 100644
index fba56cb6145c4d60e0e4bef3f1bd637a45407d58..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/retrievers/self_query/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-sidebar-position: 0
----
-
-# Self-querying retrievers
-
-Learn about how the self-querying retriever works [here](/docs/how_to/self_query).
-
-import DocCardList from "@theme/DocCardList";
-
-<DocCardList />
diff --git a/langchain_md_files/integrations/text_embedding/index.mdx b/langchain_md_files/integrations/text_embedding/index.mdx
deleted file mode 100644
index 169480565122bfa84beca2d841b588f513b8b660..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/text_embedding/index.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
----
-
-# Embedding models
-
-import { CategoryTable, IndexTable } from "@theme/FeatureTables";
-
-[Embedding models](/docs/concepts/embedding_models) create a vector representation of a piece of text.
-
-This page documents integrations with various model providers that allow you to use embeddings in LangChain.
-
-import EmbeddingTabs from "@theme/EmbeddingTabs";
-
-<EmbeddingTabs/>
-
-```python
-embeddings.embed_query("Hello, world!")
-```
-
-<CategoryTable category="text_embedding" />
-
-## All embedding models
-
-<IndexTable />
diff --git a/langchain_md_files/integrations/vectorstores/index.mdx b/langchain_md_files/integrations/vectorstores/index.mdx
deleted file mode 100644
index c213d968ee3e2ff0e5a9e25a98f7d14a980b8b51..0000000000000000000000000000000000000000
--- a/langchain_md_files/integrations/vectorstores/index.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
----
-
-# Vector stores
-
-import { CategoryTable, IndexTable } from "@theme/FeatureTables";
-
-A [vector store](/docs/concepts/vectorstores) stores [embedded](/docs/concepts/embedding_models) data and performs similarity search.
-
-**Select embedding model:**
-
-import EmbeddingTabs from "@theme/EmbeddingTabs";
-
-<EmbeddingTabs/>
-
-**Select vector store:**
-
-import VectorStoreTabs from "@theme/VectorStoreTabs";
-
-<VectorStoreTabs/>
-
-<CategoryTable category="vectorstores" />
-
-## All Vectorstores
-
-<IndexTable />
-
diff --git a/langchain_md_files/introduction.mdx b/langchain_md_files/introduction.mdx
deleted file mode 100644
index 182c8e616019b46058bc3939bd123f7ce888fe04..0000000000000000000000000000000000000000
--- a/langchain_md_files/introduction.mdx
+++ /dev/null
@@ -1,118 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
----
-
-# Introduction
-
-**LangChain** is a framework for developing applications powered by large language models (LLMs).
-
-LangChain simplifies every stage of the LLM application lifecycle:
-- **Development**: Build your applications using LangChain's open-source [components](/docs/concepts) and [third-party integrations](/docs/integrations/providers/).
-Use [LangGraph](/docs/concepts/architecture/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
-- **Productionization**: Use [LangSmith](https://docs.smith.langchain.com/) to inspect, monitor and evaluate your applications, so that you can continuously optimize and deploy with confidence.
-- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Platform](https://langchain-ai.github.io/langgraph/cloud/).
-
-import ThemedImage from '@theme/ThemedImage';
-import useBaseUrl from '@docusaurus/useBaseUrl';
-
-<ThemedImage
-  alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
-  sources={{
-    light: useBaseUrl('/svg/langchain_stack_112024.svg'),
-    dark: useBaseUrl('/svg/langchain_stack_112024_dark.svg'),
-  }}
-  style={{ width: "100%" }}
-  title="LangChain Framework Overview"
-/>
-
-LangChain implements a standard interface for large language models and related
-technologies, such as embedding models and vector stores, and integrates with
-hundreds of providers. See the [integrations](/docs/integrations/providers/) page for
-more.
-
-import ChatModelTabs from "@theme/ChatModelTabs";
-
-<ChatModelTabs/>
-
-```python
-model.invoke("Hello, world!")
-```
-
-:::note
-
-These docs focus on the Python LangChain library. [Head here](https://js.langchain.com) for docs on the JavaScript LangChain library.
-
-:::
-
-## Architecture
-
-The LangChain framework consists of multiple open-source libraries. Read more in the
-[Architecture](/docs/concepts/architecture/) page.
-
-- **`langchain-core`**: Base abstractions for chat models and other components.
-- **Integration packages** (e.g. `langchain-openai`, `langchain-anthropic`, etc.): Important integrations have been split into lightweight packages that are co-maintained by the LangChain team and the integration developers.
-- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
-- **`langchain-community`**: Third-party integrations that are community maintained.
-- **`langgraph`**: Orchestration framework for combining LangChain components into production-ready applications with persistence, streaming, and other key features. See [LangGraph documentation](https://langchain-ai.github.io/langgraph/).
-
-## Guides
-
-### [Tutorials](/docs/tutorials)
-
-If you're looking to build something specific or are more of a hands-on learner, check out our [tutorials section](/docs/tutorials).
-This is the best place to get started.
-
-These are the best ones to get started with:
-
-- [Build a Simple LLM Application](/docs/tutorials/llm_chain)
-- [Build a Chatbot](/docs/tutorials/chatbot)
-- [Build an Agent](/docs/tutorials/agents)
-- [Introduction to LangGraph](https://langchain-ai.github.io/langgraph/tutorials/introduction/)
-
-Explore the full list of LangChain tutorials [here](/docs/tutorials), and check out other [LangGraph tutorials here](https://langchain-ai.github.io/langgraph/tutorials/). To learn more about LangGraph, check out our first LangChain Academy course, *Introduction to LangGraph*, available [here](https://academy.langchain.com/courses/intro-to-langgraph).
-
-
-### [How-to guides](/docs/how_to)
-
-[Here](/docs/how_to) you’ll find short answers to “How do I….?” types of questions.
-These how-to guides don’t cover topics in depth – you’ll find that material in the [Tutorials](/docs/tutorials) and the [API Reference](https://python.langchain.com/api_reference/).
-However, these guides will help you quickly accomplish common tasks using [chat models](/docs/how_to/#chat-models),
-[vector stores](/docs/how_to/#vector-stores), and other common LangChain components.
-
-Check out [LangGraph-specific how-tos here](https://langchain-ai.github.io/langgraph/how-tos/).
-
-### [Conceptual guide](/docs/concepts)
-
-Introductions to all the key parts of LangChain you’ll need to know! [Here](/docs/concepts) you'll find high level explanations of all LangChain concepts.
-
-For a deeper dive into LangGraph concepts, check out [this page](https://langchain-ai.github.io/langgraph/concepts/).
-
-### [Integrations](integrations/providers/index.mdx)
-
-LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it.
-If you're looking to get up and running quickly with [chat models](/docs/integrations/chat/), [vector stores](/docs/integrations/vectorstores/),
-or other LangChain components from a specific provider, check out our growing list of [integrations](/docs/integrations/providers/).
-
-
-### [API reference](https://python.langchain.com/api_reference/)
-Head to the reference section for full documentation of all classes and methods in the LangChain Python packages.
-
-## Ecosystem
-
-### [🦜🛠️ LangSmith](https://docs.smith.langchain.com)
-Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
-
-### [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph)
-Build stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it. LangGraph powers production-grade agents, trusted by Linkedin, Uber, Klarna, GitLab, and many more.
-
-## Additional resources
-
-### [Versions](/docs/versions/v0_3/)
-See what changed in v0.3, learn how to migrate legacy code, read up on our versioning policies, and more.
-
-### [Security](/docs/security)
-Read up on [security](/docs/security) best practices to make sure you're developing safely with LangChain.
-
-### [Contributing](contributing/index.mdx)
-Check out the developer's guide for guidelines on contributing and help getting your dev environment set up.
diff --git a/langchain_md_files/people.mdx b/langchain_md_files/people.mdx
deleted file mode 100644
index 2426dab3a5f2af55dd524a646b9b8d57d9eaf19b..0000000000000000000000000000000000000000
--- a/langchain_md_files/people.mdx
+++ /dev/null
@@ -1,46 +0,0 @@
----
-hide_table_of_contents: true
----
-
-import People from "@theme/People";
-
-# People
-
-There are some incredible humans from all over the world who have been instrumental in helping the LangChain community flourish 🌐!
-
-This page highlights a few of those folks who have dedicated their time to the open-source repo in the form of direct contributions and reviews.
-
-## Top reviewers
-
-As LangChain has grown, the amount of surface area that maintainers cover has grown as well.
-
-Thank you to the following folks who have gone above and beyond in reviewing incoming PRs 🙏!
-
-<People type="top_reviewers"></People>
-
-## Top recent contributors
-
-The list below contains contributors who have had the most PRs merged in the last three months, weighted (imperfectly) by impact.
-
-Thank you all so much for your time and efforts in making LangChain better ❤️!
-
-<People type="top_recent_contributors" count="20"></People>
-
-## Core maintainers
-
-Hello there 👋!
-
-We're LangChain's core maintainers. If you've spent time in the community, you've probably crossed paths
-with at least one of us already. 
-
-<People type="maintainers"></People>
-
-## Top all-time contributors
-
-And finally, this is an all-time list of all-stars who have made significant contributions to the framework 🌟:
-
-<People type="top_contributors"></People>
-
-We're so thankful for your support!
-
-And one more thank you to [@tiangolo](https://github.com/tiangolo) for inspiration via FastAPI's [excellent people page](https://fastapi.tiangolo.com/fastapi-people).
diff --git a/langchain_md_files/troubleshooting/errors/INVALID_PROMPT_INPUT.mdx b/langchain_md_files/troubleshooting/errors/INVALID_PROMPT_INPUT.mdx
deleted file mode 100644
index 030881a19c0d0f36bff5463e157525835f6e1414..0000000000000000000000000000000000000000
--- a/langchain_md_files/troubleshooting/errors/INVALID_PROMPT_INPUT.mdx
+++ /dev/null
@@ -1,14 +0,0 @@
-# INVALID_PROMPT_INPUT
-
-A [prompt template](/docs/concepts/prompt_templates) received missing or invalid input variables.
-
-## Troubleshooting
-
-The following may help resolve this error:
-
-- Double-check your prompt template to ensure that it is correct.
-  - If you are using the default f-string format and you are using curly braces `{` anywhere in your template, they should be double escaped like this: `{{` (and if you want to render a double curly brace, you should use four curly braces: `{{{{`).
-- If you are using a [`MessagesPlaceholder`](/docs/concepts/prompt_templates/#messagesplaceholder), make sure that you are passing in an array of messages or message-like objects.
-  - If you are using shorthand tuples to declare your prompt template, make sure that the variable name is wrapped in curly braces (`["placeholder", "{messages}"]`).
-- Try viewing the inputs into your prompt template using [LangSmith](https://docs.smith.langchain.com/) or log statements to confirm they appear as expected.
-- If you are pulling a prompt from the [LangChain Prompt Hub](https://smith.langchain.com/prompts), try pulling and logging it or running it in isolation with a sample input to confirm that it is what you expect.
diff --git a/langchain_md_files/troubleshooting/errors/MODEL_AUTHENTICATION.mdx b/langchain_md_files/troubleshooting/errors/MODEL_AUTHENTICATION.mdx
deleted file mode 100644
index e3d58b5e6e79bcb68ba5a37fab08442f6878e687..0000000000000000000000000000000000000000
--- a/langchain_md_files/troubleshooting/errors/MODEL_AUTHENTICATION.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
-# MODEL_AUTHENTICATION
-
-Your model provider is denying you access to their service.
-
-## Troubleshooting
-
-The following may help resolve this error:
-
-- Confirm that your API key or other credentials are correct.
-- If you are relying on an environment variable to authenticate, confirm that the variable name is correct and that it has a value set.
-  - Note that environment variables can also be set by packages like `dotenv`.
-  - For models, you can try explicitly passing an `api_key` parameter to rule out any environment variable issues like this:
-
-```python
-model = ChatOpenAI(api_key="YOUR_KEY_HERE")
-```
-
-- If you are using a proxy or other custom endpoint, make sure that your custom provider does not expect an alternative authentication scheme.
diff --git a/langchain_md_files/troubleshooting/errors/MODEL_NOT_FOUND.mdx b/langchain_md_files/troubleshooting/errors/MODEL_NOT_FOUND.mdx
deleted file mode 100644
index 32b8d6896a6a26639a3884c8532fbb5e94eb4d1c..0000000000000000000000000000000000000000
--- a/langchain_md_files/troubleshooting/errors/MODEL_NOT_FOUND.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
-# MODEL_NOT_FOUND
-
-The model name you have specified is not acknowledged by your provider.
-
-## Troubleshooting
-
-The following may help resolve this error:
-
-- Double check the model string you are passing in.
-- If you are using a proxy or other alternative host with a model wrapper, confirm that the permitted model names are not restricted or altered.
diff --git a/langchain_md_files/troubleshooting/errors/MODEL_RATE_LIMIT.mdx b/langchain_md_files/troubleshooting/errors/MODEL_RATE_LIMIT.mdx
deleted file mode 100644
index d589913479e1fc320aeccece96fac641a6076939..0000000000000000000000000000000000000000
--- a/langchain_md_files/troubleshooting/errors/MODEL_RATE_LIMIT.mdx
+++ /dev/null
@@ -1,13 +0,0 @@
-# MODEL_RATE_LIMIT
-
-You have hit the maximum number of requests that a model provider allows over a given time period and are being temporarily blocked.
-Generally, this error is temporary and your limit will reset after a certain amount of time.
-
-## Troubleshooting
-
-The following may help resolve this error:
-
-- Contact your model provider and ask for a rate limit increase.
-- If many of your incoming requests are the same, utilize [model response caching](/docs/how_to/chat_model_caching/).
-- Spread requests across different providers if your application allows it.
-- Use a [`rate_limiter`](/docs/how_to/chat_model_rate_limiting/) to control the rate of requests to the model.
diff --git a/langchain_md_files/troubleshooting/errors/index.mdx b/langchain_md_files/troubleshooting/errors/index.mdx
deleted file mode 100644
index aad0308b9c49448444a5de1870ce083eb511bac2..0000000000000000000000000000000000000000
--- a/langchain_md_files/troubleshooting/errors/index.mdx
+++ /dev/null
@@ -1,12 +0,0 @@
-# Error reference
-
-This page contains guides around resolving common errors you may find while building with LangChain.
-Errors referenced below will have an `lc_error_code` property corresponding to one of the below codes when they are thrown in code.
-
-- [INVALID_PROMPT_INPUT](/docs/troubleshooting/errors/INVALID_PROMPT_INPUT)
-- [INVALID_TOOL_RESULTS](/docs/troubleshooting/errors/INVALID_TOOL_RESULTS)
-- [MESSAGE_COERCION_FAILURE](/docs/troubleshooting/errors/MESSAGE_COERCION_FAILURE)
-- [MODEL_AUTHENTICATION](/docs/troubleshooting/errors/MODEL_AUTHENTICATION)
-- [MODEL_NOT_FOUND](/docs/troubleshooting/errors/MODEL_NOT_FOUND)
-- [MODEL_RATE_LIMIT](/docs/troubleshooting/errors/MODEL_RATE_LIMIT)
-- [OUTPUT_PARSING_FAILURE](/docs/troubleshooting/errors/OUTPUT_PARSING_FAILURE)
diff --git a/langchain_md_files/tutorials/index.mdx b/langchain_md_files/tutorials/index.mdx
deleted file mode 100644
index 25486c45e8fc42b2c0c6994360eacb7bbf713a9d..0000000000000000000000000000000000000000
--- a/langchain_md_files/tutorials/index.mdx
+++ /dev/null
@@ -1,47 +0,0 @@
----
-sidebar_position: 0
-sidebar_class_name: hidden
----
-# Tutorials
-
-New to LangChain or LLM app development in general? Read this material to quickly get up and running building your first applications.
-
-## Get started
-
-Familiarize yourself with LangChain's open-source components by building simple applications.
-
-If you're looking to get started with [chat models](/docs/integrations/chat/), [vector stores](/docs/integrations/vectorstores/),
-or other LangChain components from a specific provider, check out our supported [integrations](/docs/integrations/providers/).
-
-- [Chat models and prompts](/docs/tutorials/llm_chain): Build a simple LLM application with [prompt templates](/docs/concepts/prompt_templates) and [chat models](/docs/concepts/chat_models).
-- [Semantic search](/docs/tutorials/retrievers): Build a semantic search engine over a PDF with [document loaders](/docs/concepts/document_loaders), [embedding models](/docs/concepts/embedding_models/), and [vector stores](/docs/concepts/vectorstores/).
-- [Classification](/docs/tutorials/classification): Classify text into categories or labels using [chat models](/docs/concepts/chat_models) with [structured outputs](/docs/concepts/structured_outputs/).
-- [Extraction](/docs/tutorials/extraction): Extract structured data from text and other unstructured media using [chat models](/docs/concepts/chat_models) and [few-shot examples](/docs/concepts/few_shot_prompting/).
-
-Refer to the [how-to guides](/docs/how_to) for more detail on using all LangChain components.
-
-## Orchestration
-
-Get started using [LangGraph](https://langchain-ai.github.io/langgraph/) to assemble LangChain components into full-featured applications.
-
-- [Chatbots](/docs/tutorials/chatbot): Build a chatbot that incorporates memory.
-- [Agents](/docs/tutorials/agents): Build an agent that interacts with external tools.
-- [Retrieval Augmented Generation (RAG) Part 1](/docs/tutorials/rag): Build an application that uses your own documents to inform its responses.
-- [Retrieval Augmented Generation (RAG) Part 2](/docs/tutorials/qa_chat_history): Build a RAG application that incorporates a memory of its user interactions and multi-step retrieval.
-- [Question-Answering with SQL](/docs/tutorials/sql_qa): Build a question-answering system that executes SQL queries to inform its responses.
-- [Summarization](/docs/tutorials/summarization): Generate summaries of (potentially long) texts.
-- [Question-Answering with Graph Databases](/docs/tutorials/graph): Build a question-answering system that queries a graph database to inform its responses.
-
-## LangSmith
-
-LangSmith allows you to closely trace, monitor and evaluate your LLM application.
-It seamlessly integrates with LangChain, and you can use it to inspect and debug individual steps of your chains as you build.
-
-LangSmith documentation is hosted on a separate site.
-You can peruse [LangSmith tutorials here](https://docs.smith.langchain.com/tutorials/).
-
-### Evaluation
-
-LangSmith helps you evaluate the performance of your LLM applications. The tutorial below is a great way to get started:
-
-- [Evaluate your LLM application](https://docs.smith.langchain.com/tutorials/Developers/evaluation)
diff --git a/langchain_md_files/versions/migrating_memory/index.mdx b/langchain_md_files/versions/migrating_memory/index.mdx
deleted file mode 100644
index 0043dcb13b54c9675999c6126cbd8eccc6564649..0000000000000000000000000000000000000000
--- a/langchain_md_files/versions/migrating_memory/index.mdx
+++ /dev/null
@@ -1,152 +0,0 @@
----
-sidebar_position: 1
----
-
-# How to migrate to LangGraph memory
-
-As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into their LangChain application.
-
-* Users that rely on `RunnableWithMessageHistory` or `BaseChatMessageHistory` do **not** need to make any changes, but are encouraged to consider using LangGraph for more complex use cases.
-* Users that rely on deprecated memory abstractions from LangChain 0.0.x should follow this guide to upgrade to the new LangGraph persistence feature in LangChain 0.3.x.
-
-## Why use LangGraph for memory?
-
-The main advantages of persistence in LangGraph are:
-
-- Built-in support for multiple users and conversations, which is a typical requirement for real-world conversational AI applications.
-- Ability to save and resume complex conversations at any point. This helps with:
-  - Error recovery
-  - Allowing human intervention in AI workflows
-  - Exploring different conversation paths ("time travel")
-- Full compatibility with both traditional [language models](/docs/concepts/text_llms) and modern [chat models](/docs/concepts/chat_models). Early memory implementations in LangChain weren't designed for newer chat model APIs, causing issues with features like tool-calling. LangGraph memory can persist any custom state.
-- Highly customizable, allowing you to fully control how memory works and use different storage backends.
-
-## Evolution of memory in LangChain
-
-The concept of memory has evolved significantly in LangChain since its initial release.
-
-### LangChain 0.0.x memory
-
-Broadly speaking, LangChain 0.0.x memory was used to handle three main use cases:
-
-| Use Case                             | Example                                                                                                                           |
-|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|
-| Managing conversation history        | Keep only the last `n` turns of the conversation between the user and the AI.                                                     |
-| Extraction of structured information | Extract structured information from the conversation history, such as a list of facts learned about the user.                     |
-| Composite memory implementations     | Combine multiple memory sources, e.g., a list of known facts about the user along with facts learned during a given conversation. |
-
-While the LangChain 0.0.x memory abstractions were useful, they were limited in their capabilities and not well suited for real-world conversational AI applications. These memory abstractions lacked built-in support for multi-user, multi-conversation scenarios, which are essential for practical conversational AI systems.
-
-Most of these implementations have been officially deprecated in LangChain 0.3.x in favor of LangGraph persistence.
-
-### RunnableWithMessageHistory and BaseChatMessageHistory
-
-:::note
-Please see [How to use BaseChatMessageHistory with LangGraph](./chat_history), if you would like to use `BaseChatMessageHistory` (with or without `RunnableWithMessageHistory`) in LangGraph.
-:::
-
-As of LangChain v0.1, we started recommending that users rely primarily on [BaseChatMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory). `BaseChatMessageHistory` serves
-as a simple persistence for storing and retrieving messages in a conversation.
-
-At that time, the only option for orchestrating LangChain chains was via [LCEL](https://python.langchain.com/docs/how_to/#langchain-expression-language-lcel). To incorporate memory with `LCEL`, users had to use the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) interface. While sufficient for basic chat applications, many users found the API unintuitive and challenging to use.
-
-As of LangChain v0.3, we recommend that **new** code takes advantage of LangGraph for both orchestration and persistence:
-
-- Orchestration: In LangGraph, users define [graphs](https://langchain-ai.github.io/langgraph/concepts/low_level/) that specify the flow of the application. This allows users to keep using `LCEL` within individual nodes when `LCEL` is needed, while making it easy to define complex orchestration logic that is more readable and maintainable.
-- Persistence: Users can rely on LangGraph's [persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to store and retrieve data. LangGraph persistence is extremely flexible and can support a much wider range of use cases than the `RunnableWithMessageHistory` interface.
-
-:::important
-If you have been using `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do not need to make any changes. We do not plan on deprecating either functionality in the near future. This functionality is sufficient for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.
-:::
-
-## Migrations
-
-:::info Prerequisites
-
-These guides assume some familiarity with the following concepts:
-- [LangGraph](https://langchain-ai.github.io/langgraph/)
-- [v0.0.x Memory](https://python.langchain.com/v0.1/docs/modules/memory/)
-- [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)
-:::
-
-### 1. Managing conversation history
-
-The goal of managing conversation history is to store and retrieve the history in a way that is optimal for a chat model to use.
-
-Often this involves trimming and / or summarizing the conversation history to keep the most relevant parts of the conversation while having the conversation fit inside the context window of the chat model.
-
-Memory classes that fall into this category include:
-
-| Memory Type                       | How to Migrate                                                                                                                                              | Description                                                                                                                                                                                                         |
-|-----------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `ConversationBufferMemory`        | [Link to Migration Guide](conversation_buffer_memory)                                                                                                       | A basic memory implementation that simply stores the conversation history.                                                                                                                                          |
-| `ConversationStringBufferMemory`  | [Link to Migration Guide](conversation_buffer_memory)                                                                                                       | A special case of `ConversationBufferMemory` designed for LLMs and no longer relevant.                                                                                                                              |
-| `ConversationBufferWindowMemory`  | [Link to Migration Guide](conversation_buffer_window_memory)                                                                                                | Keeps the last `n` turns of the conversation. Drops the oldest turn when the buffer is full.                                                                                                                        |
-| `ConversationTokenBufferMemory`   | [Link to Migration Guide](conversation_buffer_window_memory)                                                                                                | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit.                                                   |
-| `ConversationSummaryMemory`       | [Link to Migration Guide](conversation_summary_memory)                                                                                                      | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history.                                              |
-| `ConversationSummaryBufferMemory` | [Link to Migration Guide](conversation_summary_memory)                                                                                                      | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
-| `VectorStoreRetrieverMemory`      | See related [long-term memory agent tutorial](long_term_memory_agent) | Stores the conversation history in a vector store and retrieves the most relevant parts of past conversation based on the input.                                                                                    |
-
-
-### 2. Extraction of structured information from the conversation history
-
-Please see [long-term memory agent tutorial](long_term_memory_agent) implements an agent that can extract structured information from the conversation history.
-
-Memory classes that fall into this category include:
-
-| Memory Type                | Description                                                                                                                                                                                                       |
-|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `BaseEntityStore`          | An abstract interface that resembles a key-value store. It was used for storing structured information learned during the conversation. The information had to be represented as a dictionary of key-value pairs. |
-| `ConversationEntityMemory` | Combines the ability to summarize the conversation while extracting structured information from the conversation history.                                                                                         |
-
-And specific backend implementations of abstractions:
-
-| Memory Type               | Description                                                                                              |
-|---------------------------|----------------------------------------------------------------------------------------------------------|
-| `InMemoryEntityStore`     | An implementation of `BaseEntityStore` that stores the information in the literal computer memory (RAM). |
-| `RedisEntityStore`        | A specific implementation of `BaseEntityStore` that uses Redis as the backend.                           |
-| `SQLiteEntityStore`       | A specific implementation of `BaseEntityStore` that uses SQLite as the backend.                          |
-| `UpstashRedisEntityStore` | A specific implementation of `BaseEntityStore` that uses Upstash as the backend.                         |
-
-These abstractions have received limited development since their initial release. This is because they generally require significant customization for a specific application to be effective, making
-them less widely used than the conversation history management abstractions.
-
-For this reason, there are no migration guides for these abstractions. If you're struggling to migrate an application
-that relies on these abstractions, please:
-1) Please review this [Long-term memory agent tutorial](long_term_memory_agent) which should provide a good starting point for how to extract structured information from the conversation history.
-2) If you're still struggling, please open an issue on the LangChain GitHub repository, explain your use case, and we'll try to provide more guidance on how to migrate these abstractions.
-
-The general strategy for extracting structured information from the conversation history is to use a chat model with tool calling capabilities to extract structured information from the conversation history.
-The extracted information can then be saved into an appropriate data structure (e.g., a dictionary), and information from it can be retrieved and added into the prompt as needed.
-
-### 3. Implementations that provide composite logic on top of one or more memory implementations
-
-Memory classes that fall into this category include:
-
-| Memory Type            | Description                                                                                                                    |
-|------------------------|--------------------------------------------------------------------------------------------------------------------------------|
-| `CombinedMemory`       | This abstraction accepted a list of `BaseMemory` and fetched relevant memory information from each of them based on the input. |
-| `SimpleMemory`         | Used to add read-only hard-coded context. Users can simply write this information into the prompt.                             |
-| `ReadOnlySharedMemory` | Provided a read-only view of an existing `BaseMemory` implementation.                                                          |
-
-These implementations did not seem to be used widely or provide significant value. Users should be able
-to re-implement these without too much difficulty in custom code.
-
-## Related Resources
-
-Explore persistence with LangGraph:
-
-* [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/)
-* [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)
-* [How to manage conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/manage-conversation-history/)
-* [How to add summary of the conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/)
-
-Add persistence with simple LCEL (favor langgraph for more complex use cases):
-
-* [How to add message history](https://python.langchain.com/docs/how_to/message_history/)
-
-Working with message history:
-
-* [How to trim messages](https://python.langchain.com/docs/how_to/trim_messages)
-* [How to filter messages](https://python.langchain.com/docs/how_to/filter_messages/)
-* [How to merge message runs](https://python.langchain.com/docs/how_to/merge_message_runs/)
diff --git a/langchain_md_files/versions/release_policy.mdx b/langchain_md_files/versions/release_policy.mdx
deleted file mode 100644
index aa6278382af22a9f25729057cd53df710b4e4be7..0000000000000000000000000000000000000000
--- a/langchain_md_files/versions/release_policy.mdx
+++ /dev/null
@@ -1,102 +0,0 @@
----
-sidebar_position: 2
-sidebar_label: Release policy
----
-
-# LangChain release policy
-
-The LangChain ecosystem is composed of different component packages (e.g., `langchain-core`, `langchain`, `langchain-community`, `langgraph`, `langserve`, partner packages etc.)
-
-## Versioning
-
-### `langchain`, `langchain-core`, and integration packages
-
-`langchain`, `langchain-core`, `langchain-text-splitters`, and integration packages (`langchain-openai`, `langchain-anthropic`, etc.) follow [semantic versioning](https://semver.org/) in the format of 0.**Y**.**Z**. The packages are under rapid development, and so are currently versioning the packages with a major version of 0.
-
-Minor version increases will occur for:
-
-- Breaking changes for any public interfaces *not* marked as `beta`.
-
-Patch version increases will occur for:
-
-- Bug fixes,
-- New features,
-- Any changes to private interfaces,
-- Any changes to `beta` features.
-
-When upgrading between minor versions, users should review the list of breaking changes and deprecations.
-
-From time to time, we will version packages as **release candidates**. These are versions that are intended to be released as stable versions, but we want to get feedback from the community before doing so. Release candidates will be versioned as 0.**Y**.**Z**rc**N**. For example, 0.2.0rc1. If no issues are found, the release candidate will be released as a stable version with the same version number. If issues are found, we will release a new release candidate with an incremented `N` value (e.g., 0.2.0rc2).
-
-### `langchain-community`
-
-`langchain-community` is currently on version `0.2.x`.
-
-Minor version increases will occur for:
-
-- Updates to the major/minor versions of required `langchain-x` dependencies. E.g., when updating the required version of `langchain-core` from `^0.2.x` to `0.3.0`.
-
-Patch version increases will occur for:
-
-- Bug fixes,
-- New features,
-- Any changes to private interfaces,
-- Any changes to `beta` features,
-- Breaking changes to integrations to reflect breaking changes in the third-party service.
-
-Whenever possible we will avoid making breaking changes in patch versions.
-However, if an external API makes a breaking change then breaking changes to the corresponding `langchain-community` integration can occur in a patch version.
-
-### `langchain-experimental`
-
-`langchain-experimental` is currently on version `0.0.x`. All changes will be accompanied with patch version increases.
-
-## Release cadence
-
-We expect to space out **minor** releases (e.g., from 0.2.x to 0.3.0) of `langchain` and `langchain-core` by at least 2-3 months, as such releases may contain breaking changes.
-
-Patch versions are released frequently, up to a few times per week, as they contain bug fixes and new features.
-
-## API stability
-
-The development of LLM applications is a rapidly evolving field, and we are constantly learning from our users and the community. As such, we expect that the APIs in `langchain` and `langchain-core` will continue to evolve to better serve the needs of our users.
-
-Even though both `langchain` and `langchain-core` are currently in a pre-1.0 state, we are committed to maintaining API stability in these packages.
-
-- Breaking changes to the public API will result in a minor version bump (the second digit)
-- Any bug fixes or new features will result in a patch version bump (the third digit)
-
-We will generally try to avoid making unnecessary changes, and will provide a deprecation policy for features that are being removed.
-
-### Stability of other packages
-
-The stability of other packages in the LangChain ecosystem may vary:
-
-- `langchain-community` is a community maintained package that contains 3rd party integrations. While we do our best to review and test changes in `langchain-community`, `langchain-community` is expected to experience more breaking changes than `langchain` and `langchain-core` as it contains many community contributions.
-- Partner packages may follow different stability and versioning policies, and users should refer to the documentation of those packages for more information; however, in general these packages are expected to be stable.
-
-### What is a "API stability"?
-
-API stability means:
-
-- All the public APIs (everything in this documentation) will not be moved or renamed without providing backwards-compatible aliases.
-- If new features are added to these APIs – which is quite possible – they will not break or change the meaning of existing methods. In other words, "stable" does not (necessarily) mean "complete."
-- If, for some reason, an API declared stable must be removed or replaced, it will be declared deprecated but will remain in the API for at least two minor releases. Warnings will be issued when the deprecated method is called.
-
-### **APIs marked as internal**
-
-Certain APIs are explicitly marked as “internal” in a couple of ways:
-
-- Some documentation refers to internals and mentions them as such. If the documentation says that something is internal, it may change.
-- Functions, methods, and other objects prefixed by a leading underscore (**`_`**). This is the standard Python convention of indicating that something is private; if any method starts with a single **`_`**, it’s an internal API.
-    - **Exception:** Certain methods are prefixed with `_` , but do not contain an implementation. These methods are *meant* to be overridden by sub-classes that provide the implementation. Such methods are generally part of the **Public API** of LangChain.
-
-## Deprecation policy
-
-We will generally avoid deprecating features until a better alternative is available.
-
-When a feature is deprecated, it will continue to work in the current and next minor version of `langchain` and `langchain-core`. After that, the feature will be removed.
-
-Since we're expecting to space out minor releases by at least 2-3 months, this means that a feature can be removed within 2-6 months of being deprecated.
-
-In some situations, we may allow the feature to remain in the code base for longer periods of time, if it's not causing issues in the packages, to reduce the burden on users.
\ No newline at end of file
diff --git a/langchain_md_files/versions/v0_2/deprecations.mdx b/langchain_md_files/versions/v0_2/deprecations.mdx
deleted file mode 100644
index d08d885c2d393ce67b15bd25b104deadff4ea914..0000000000000000000000000000000000000000
--- a/langchain_md_files/versions/v0_2/deprecations.mdx
+++ /dev/null
@@ -1,902 +0,0 @@
----
-sidebar_position: 3
-sidebar_label: Changes
-keywords: [retrievalqa, llmchain, conversationalretrievalchain]
----
-
-# Deprecations and Breaking Changes
-
-This code contains a list of deprecations and removals in the `langchain` and `langchain-core` packages.
-
-New features and improvements are not listed here. See the [overview](/docs/versions/v0_2/overview/) for a summary of what's new in this release.
-
-## Breaking changes
-
-As of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain`  should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly.
-
-The following functions and classes require an explicit LLM to be passed as an argument:
-
-- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit`
-- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit`
-- `langchain.chains.openai_functions.get_openapi_chain`
-- `langchain.chains.router.MultiRetrievalQAChain.from_retrievers`
-- `langchain.indexes.VectorStoreIndexWrapper.query`
-- `langchain.indexes.VectorStoreIndexWrapper.query_with_sources`
-- `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources`
-- `langchain.chains.flare.FlareChain`
-
-
-The following classes now require passing an explicit Embedding model as an argument:
-
-- `langchain.indexes.VectostoreIndexCreator`
-
-The following code has been removed:
-
-- `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method.
-
-Behavior was changed for the following code:
-
-
-### @tool decorator
-
-`@tool` decorator now assigns the function doc-string as the tool description. Previously, the `@tool` decorator
-using to prepend the function signature.
-
-Before 0.2.0:
-
-```python
-@tool
-def my_tool(x: str) -> str:
-    """Some description."""
-    return "something"
-
-print(my_tool.description)
-```
-
-Would result in: `my_tool: (x: str) -> str - Some description.`
-
-As of 0.2.0:
-
-It will result in: `Some description.`
-
-## Code that moved to another package
-
-Code that was moved from `langchain` into another package (e.g, `langchain-community`)
-
-If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
-
- ```shell
- python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
-```
-
- ```shell
- LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
-
- >> from langchain.document_loaders import UnstructuredMarkdownLoader
-
- with new imports of:
-
- >> from langchain_community.document_loaders import UnstructuredMarkdownLoader
-```
-
-We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
-
-However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, we’re releasing a migration script via the LangChain CLI. See further instructions in migration guide.
-
-## Code targeted for removal
-
-Code that has better alternatives available and will eventually be removed, so there’s only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
-
-### astream events V1
-
-If you are using `astream_events`, please review how to [migrate to astream events v2](/docs/versions/v0_2/migrating_astream_events).
-
-### langchain_core
-
-#### try_load_from_hub
-
-
-In module: `utils.loading`
-Deprecated: 0.1.30
-Removal: 0.3.0
-
-
-Alternative: Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead.
-
-
-#### BaseLanguageModel.predict
-
-
-In module: `language_models.base`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseLanguageModel.predict_messages
-
-
-In module: `language_models.base`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseLanguageModel.apredict
-
-
-In module: `language_models.base`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### BaseLanguageModel.apredict_messages
-
-
-In module: `language_models.base`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### RunTypeEnum
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Use string instead.
-
-
-#### TracerSessionV1Base
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### TracerSessionV1Create
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### TracerSessionV1
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### TracerSessionBase
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### TracerSession
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### BaseRun
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Run
-
-
-#### LLMRun
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Run
-
-
-#### ChainRun
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Run
-
-
-#### ToolRun
-
-
-In module: `tracers.schemas`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Run
-
-
-#### BaseChatModel.__call__
-
-
-In module: `language_models.chat_models`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseChatModel.call_as_llm
-
-
-In module: `language_models.chat_models`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseChatModel.predict
-
-
-In module: `language_models.chat_models`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseChatModel.predict_messages
-
-
-In module: `language_models.chat_models`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseChatModel.apredict
-
-
-In module: `language_models.chat_models`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### BaseChatModel.apredict_messages
-
-
-In module: `language_models.chat_models`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### BaseLLM.__call__
-
-
-In module: `language_models.llms`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseLLM.predict
-
-
-In module: `language_models.llms`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseLLM.predict_messages
-
-
-In module: `language_models.llms`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseLLM.apredict
-
-
-In module: `language_models.llms`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### BaseLLM.apredict_messages
-
-
-In module: `language_models.llms`
-Deprecated: 0.1.7
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### BaseRetriever.get_relevant_documents
-
-
-In module: `retrievers`
-Deprecated: 0.1.46
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### BaseRetriever.aget_relevant_documents
-
-
-In module: `retrievers`
-Deprecated: 0.1.46
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### ChatPromptTemplate.from_role_strings
-
-
-In module: `prompts.chat`
-Deprecated: 0.0.1
-Removal:
-
-
-Alternative: from_messages classmethod
-
-
-#### ChatPromptTemplate.from_strings
-
-
-In module: `prompts.chat`
-Deprecated: 0.0.1
-Removal:
-
-
-Alternative: from_messages classmethod
-
-
-#### BaseTool.__call__
-
-
-In module: `tools`
-Deprecated: 0.1.47
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### convert_pydantic_to_openai_function
-
-
-In module: `utils.function_calling`
-Deprecated: 0.1.16
-Removal: 0.3.0
-
-
-Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
-
-
-#### convert_pydantic_to_openai_tool
-
-
-In module: `utils.function_calling`
-Deprecated: 0.1.16
-Removal: 0.3.0
-
-
-Alternative: langchain_core.utils.function_calling.convert_to_openai_tool()
-
-
-#### convert_python_function_to_openai_function
-
-
-In module: `utils.function_calling`
-Deprecated: 0.1.16
-Removal: 0.3.0
-
-
-Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
-
-
-#### format_tool_to_openai_function
-
-
-In module: `utils.function_calling`
-Deprecated: 0.1.16
-Removal: 0.3.0
-
-
-Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
-
-
-#### format_tool_to_openai_tool
-
-
-In module: `utils.function_calling`
-Deprecated: 0.1.16
-Removal: 0.3.0
-
-
-Alternative: langchain_core.utils.function_calling.convert_to_openai_tool()
-
-
-### langchain
-
-
-#### AgentType
-
-
-In module: `agents.agent_types`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Use [LangGraph](/docs/how_to/migrate_agent/) or new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
-
-
-#### Chain.__call__
-
-
-In module: `chains.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### Chain.acall
-
-
-In module: `chains.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### Chain.run
-
-
-In module: `chains.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: invoke
-
-
-#### Chain.arun
-
-
-In module: `chains.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: ainvoke
-
-
-#### Chain.apply
-
-
-In module: `chains.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: batch
-
-
-#### LLMChain
-
-
-In module: `chains.llm`
-Deprecated: 0.1.17
-Removal: 0.3.0
-
-
-Alternative: [RunnableSequence](/docs/how_to/sequence/), e.g., `prompt | llm`
-
-This [migration guide](/docs/versions/migrating_chains/llm_chain) has a side-by-side comparison.
-
-
-#### LLMSingleActionAgent
-
-
-In module: `agents.agent`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Use [LangGraph](/docs/how_to/migrate_agent/) or new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
-
-
-#### Agent
-
-
-In module: `agents.agent`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Use [LangGraph](/docs/how_to/migrate_agent/) or new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
-
-
-#### OpenAIFunctionsAgent
-
-
-In module: `agents.openai_functions_agent.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_openai_functions_agent
-
-
-#### ZeroShotAgent
-
-
-In module: `agents.mrkl.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_react_agent
-
-
-#### MRKLChain
-
-
-In module: `agents.mrkl.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### ConversationalAgent
-
-
-In module: `agents.conversational.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_react_agent
-
-
-#### ConversationalChatAgent
-
-
-In module: `agents.conversational_chat.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_json_chat_agent
-
-
-#### ChatAgent
-
-
-In module: `agents.chat.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_react_agent
-
-
-#### OpenAIMultiFunctionsAgent
-
-
-In module: `agents.openai_functions_multi_agent.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_openai_tools_agent
-
-
-#### ReActDocstoreAgent
-
-
-In module: `agents.react.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### DocstoreExplorer
-
-
-In module: `agents.react.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### ReActTextWorldAgent
-
-
-In module: `agents.react.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### ReActChain
-
-
-In module: `agents.react.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### SelfAskWithSearchAgent
-
-
-In module: `agents.self_ask_with_search.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_self_ask_with_search
-
-
-#### SelfAskWithSearchChain
-
-
-In module: `agents.self_ask_with_search.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### StructuredChatAgent
-
-
-In module: `agents.structured_chat.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_structured_chat_agent
-
-
-#### RetrievalQA
-
-
-In module: `chains.retrieval_qa.base`
-Deprecated: 0.1.17
-Removal: 0.3.0
-
-
-Alternative: [create_retrieval_chain](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.retrieval.create_retrieval_chain.html#langchain-chains-retrieval-create-retrieval-chain)
-This [migration guide](/docs/versions/migrating_chains/retrieval_qa) has a side-by-side comparison.
-
-
-#### load_agent_from_config
-
-
-In module: `agents.loading`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### load_agent
-
-
-In module: `agents.loading`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative:
-
-
-#### initialize_agent
-
-
-In module: `agents.initialize`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: Use [LangGraph](/docs/how_to/migrate_agent/) or new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
-
-
-#### XMLAgent
-
-
-In module: `agents.xml.base`
-Deprecated: 0.1.0
-Removal: 0.3.0
-
-
-Alternative: create_xml_agent
-
-
-#### CohereRerank
-
-
-In module: `retrievers.document_compressors.cohere_rerank`
-Deprecated: 0.0.30
-Removal: 0.3.0
-
-
-Alternative: langchain_cohere.CohereRerank
-
-
-#### ConversationalRetrievalChain
-
-
-In module: `chains.conversational_retrieval.base`
-Deprecated: 0.1.17
-Removal: 0.3.0
-
-
-Alternative: [create_history_aware_retriever](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.history_aware_retriever.create_history_aware_retriever.html) together with [create_retrieval_chain](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.retrieval.create_retrieval_chain.html#langchain-chains-retrieval-create-retrieval-chain) (see example in docstring)
-This [migration guide](/docs/versions/migrating_chains/conversation_retrieval_chain) has a side-by-side comparison.
-
-
-#### create_extraction_chain_pydantic
-
-
-In module: `chains.openai_tools.extraction`
-Deprecated: 0.1.14
-Removal: 0.3.0
-
-
-Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
-
-
-#### create_openai_fn_runnable
-
-
-In module: `chains.structured_output.base`
-Deprecated: 0.1.14
-Removal: 0.3.0
-
-
-Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
-
-
-#### create_structured_output_runnable
-
-
-In module: `chains.structured_output.base`
-Deprecated: 0.1.17
-Removal: 0.3.0
-
-
-Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
-
-
-#### create_openai_fn_chain
-
-
-In module: `chains.openai_functions.base`
-Deprecated: 0.1.1
-Removal: 0.3.0
-
-
-Alternative: create_openai_fn_runnable
-
-
-#### create_structured_output_chain
-
-
-In module: `chains.openai_functions.base`
-Deprecated: 0.1.1
-Removal: 0.3.0
-
-Alternative: ChatOpenAI.with_structured_output
-
-
-#### create_extraction_chain
-
-
-In module: `chains.openai_functions.extraction`
-Deprecated: 0.1.14
-Removal: 0.3.0
-
-
-Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
-
-
-#### create_extraction_chain_pydantic
-
-
-In module: `chains.openai_functions.extraction`
-Deprecated: 0.1.14
-Removal: 0.3.0
-
-
-Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
\ No newline at end of file
diff --git a/langchain_md_files/versions/v0_2/index.mdx b/langchain_md_files/versions/v0_2/index.mdx
deleted file mode 100644
index 07bb76dfdee58763d59bdc7bf81d224bda08138c..0000000000000000000000000000000000000000
--- a/langchain_md_files/versions/v0_2/index.mdx
+++ /dev/null
@@ -1,93 +0,0 @@
----
-sidebar_position: 1
----
-
-# Migration
-
-
-
-LangChain v0.2 was released in May 2024. This release includes a number of [breaking changes and deprecations](/docs/versions/v0_2/deprecations). This document contains a guide on upgrading to 0.2.x.
-
-:::note Reference
-
-- [Breaking Changes & Deprecations](/docs/versions/v0_2/deprecations)
-- [Migrating legacy chains to LCEL](/docs/versions/migrating_chains)
-- [Migrating to Astream Events v2](/docs/versions/v0_2/migrating_astream_events)
-
-:::
-
-# Migration
-
-This documentation will help you upgrade your code to LangChain `0.2.x.`. To prepare for migration, we first recommend you take the following steps:
-
-1. Install the 0.2.x versions of langchain-core, langchain and upgrade to recent versions of other packages that you may be using. (e.g. langgraph, langchain-community, langchain-openai, etc.)
-2. Verify that your code runs properly with the new packages (e.g., unit tests pass).
-3. Install a recent version of `langchain-cli` , and use the tool to replace old imports used by your code with the new imports. (See instructions below.)
-4. Manually resolve any remaining deprecation warnings.
-5. Re-run unit tests.
-6. If you are using `astream_events`, please review how to [migrate to astream events v2](/docs/versions/v0_2/migrating_astream_events).
-
-## Upgrade to new imports
-
-We created a tool to help migrate your code. This tool is still in **beta** and may not cover all cases, but
-we hope that it will help you migrate your code more quickly.
-
-The migration script has the following limitations:
-
-1. It’s limited to helping users move from old imports to new imports. It does not help address other deprecations.
-2. It can’t handle imports that involve `as` .
-3. New imports are always placed in global scope, even if the old import that was replaced was located inside some local scope (e..g, function body).
-4. It will likely miss some deprecated imports.
-
-Here is an example of the import changes that the migration script can help apply automatically:
-
-
-| From Package        | To Package               | Deprecated Import                                                  | New Import                                                          |
-|---------------------|--------------------------|--------------------------------------------------------------------|---------------------------------------------------------------------|
-| langchain           | langchain-community      | from langchain.vectorstores import InMemoryVectorStore             | from langchain_community.vectorstores import InMemoryVectorStore    |
-| langchain-community | langchain_openai         | from langchain_community.chat_models import ChatOpenAI             | from langchain_openai import ChatOpenAI                             |
-| langchain-community | langchain-core           | from langchain_community.document_loaders import Blob              | from langchain_core.document_loaders import Blob                    |
-| langchain           | langchain-core           | from langchain.schema.document import Document                     | from langchain_core.documents import Document                       |
-| langchain           | langchain-text-splitters | from langchain.text_splitter import RecursiveCharacterTextSplitter | from langchain_text_splitters import RecursiveCharacterTextSplitter |
-
-
-## Installation
-
-```bash
-pip install langchain-cli
-langchain-cli --version # <-- Make sure the version is at least 0.0.22
-```
-
-## Usage
-
-Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`).
-
-You will need to run the migration script **twice** as it only applies one import replacement per run.
-
-For example, say your code still uses `from langchain.chat_models import ChatOpenAI`:
-
-After the first run, you’ll get: `from langchain_community.chat_models import ChatOpenAI`
-After the second run, you’ll get: `from langchain_openai import ChatOpenAI`
-
-```bash
-# Run a first time
-# Will replace from langchain.chat_models import ChatOpenAI
-langchain-cli migrate --diff [path to code] # Preview
-langchain-cli migrate [path to code] # Apply
-
-# Run a second time to apply more import replacements
-langchain-cli migrate --diff [path to code] # Preview
-langchain-cli migrate [path to code] # Apply
-```
-
-### Other options
-
-```bash
-# See help menu
-langchain-cli migrate --help
-# Preview Changes without applying
-langchain-cli migrate --diff [path to code]
-# Run on code including ipython notebooks
-# Apply all import updates except for updates from langchain to langchain-core
-langchain-cli migrate --disable langchain_to_core --include-ipynb [path to code]
-```
diff --git a/langchain_md_files/versions/v0_2/migrating_astream_events.mdx b/langchain_md_files/versions/v0_2/migrating_astream_events.mdx
deleted file mode 100644
index 24d8855a6949e33ba35cf6338771a28233c3c2b5..0000000000000000000000000000000000000000
--- a/langchain_md_files/versions/v0_2/migrating_astream_events.mdx
+++ /dev/null
@@ -1,118 +0,0 @@
----
-sidebar_position: 2
-sidebar_label: astream_events v2
----
-
-# Migrating to astream_events(..., version="v2")
-
-We've added a `v2` of the astream_events API with the release of `0.2.x`. You can see this [PR](https://github.com/langchain-ai/langchain/pull/21638) for more details.
-
-The `v2` version is a re-write of the `v1` version, and should be more efficient, with more consistent output for the events. The `v1` version of the API will be deprecated in favor of the `v2` version and will be removed in  `0.4.0`.
-
-Below is a list of changes between the `v1` and `v2` versions of the API.
-
-
-### output for `on_chat_model_end`
-
-In `v1`, the outputs associated with `on_chat_model_end` changed depending on whether the
-chat model was run as a root level runnable or as part of a chain.
-
-As a root level runnable the output was:
-
-```python
-"data": {"output": AIMessageChunk(content="hello world!", id='some id')}
-```
-
-As part of a chain the output was:
-
-```
-            "data": {
-                "output": {
-                    "generations": [
-                        [
-                            {
-                                "generation_info": None,
-                                "message": AIMessageChunk(
-                                    content="hello world!", id=AnyStr()
-                                ),
-                                "text": "hello world!",
-                                "type": "ChatGenerationChunk",
-                            }
-                        ]
-                    ],
-                    "llm_output": None,
-                }
-            },
-```
-
-
-As of `v2`, the output will always be the simpler representation:
-
-```python
-"data": {"output": AIMessageChunk(content="hello world!", id='some id')}
-```
-
-:::note
-Non chat models (i.e., regular LLMs) are will be consistently associated with the more verbose format for now.
-:::
-
-### output for `on_retriever_end`
-
-`on_retriever_end` output will always return a list of `Documents`.
-
-Before:
-```python
-{
-    "data": {
-        "output": [
-            Document(...),
-            Document(...),
-            ...
-        ]
-    }
-}
-```
-
-### Removed `on_retriever_stream`
-
-The `on_retriever_stream` event was an artifact of the implementation and has been removed.
-
-Full information associated with the event is already available in the `on_retriever_end` event.
-
-Please use `on_retriever_end` instead.
-
-### Removed `on_tool_stream`
-
-The `on_tool_stream` event was an artifact of the implementation and has been removed.
-
-Full information associated with the event is already available in the `on_tool_end` event.
-
-Please use `on_tool_end` instead.
-
-### Propagating Names
-
-Names of runnables have been updated to be more consistent.
-
-```python
-model = GenericFakeChatModel(messages=infinite_cycle).configurable_fields(
-    messages=ConfigurableField(
-        id="messages",
-        name="Messages",
-        description="Messages return by the LLM",
-    )
-)
-```
-
-In `v1`, the event name was `RunnableConfigurableFields`.
-
-In `v2`, the event name is `GenericFakeChatModel`.
-
-If you're filtering by event names, check if you need to update your filters.
-
-### RunnableRetry
-
-Usage of [RunnableRetry](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.retry.RunnableRetry.html)
-within an LCEL chain being streamed generated an incorrect `on_chain_end` event in `v1` corresponding
-to the failed runnable invocation that was being retried. This event has been removed in `v2`.
-
-No action is required for this change.
diff --git a/langchain_md_files/versions/v0_2/overview.mdx b/langchain_md_files/versions/v0_2/overview.mdx
deleted file mode 100644
index 7fdb3fa78a74025ee7f2fb4688cda57d1b192e6d..0000000000000000000000000000000000000000
--- a/langchain_md_files/versions/v0_2/overview.mdx
+++ /dev/null
@@ -1,102 +0,0 @@
----
-sidebar_position: 0
----
-
-# Overview
-
-## What’s new in LangChain?
-
-The following features have been added during the development of 0.1.x:
-
-- Better streaming support via the [Event Streaming API](https://python.langchain.com/docs/expression_language/streaming/#using-stream-events).
-- [Standardized tool calling support](https://blog.langchain.dev/tool-calling-with-langchain/)
-- A standardized interface for [structuring output](https://github.com/langchain-ai/langchain/discussions/18154)
-- [@chain decorator](https://python.langchain.com/docs/expression_language/how_to/decorator/) to more easily create **RunnableLambdas**
-- https://python.langchain.com/docs/expression_language/how_to/inspect/
-- In Python, better async support for many core abstractions (thank you [@cbornet](https://github.com/cbornet)!!)
-- Include response metadata in `AIMessage` to make it easy to access raw output from the underlying models
-- Tooling to visualize [your runnables](https://python.langchain.com/docs/expression_language/how_to/inspect/) or [your langgraph app](https://github.com/langchain-ai/langgraph/blob/main/examples/visualization.ipynb)
-- Interoperability of chat message histories across most providers
-- [Over 20+ partner packages in python](https://python.langchain.com/docs/integrations/providers/) for popular integrations
-
-
-## What’s coming to LangChain?
-
-- We’ve been working hard on [langgraph](https://langchain-ai.github.io/langgraph/). We will be building more capabilities on top of it and focusing on making it the go-to framework for agent architectures.
-- Vectorstores V2! We’ll be revisiting our vectorstores abstractions to help improve usability and reliability.
-- Better documentation and versioned docs!
-- We’re planning a breaking release (0.3.0) sometime between July-September to [upgrade to full support of Pydantic 2](https://github.com/langchain-ai/langchain/discussions/19339), and will drop support for Pydantic 1 (including objects originating from the `v1` namespace of Pydantic 2).
-
-## What changed?
-
-Due to the rapidly evolving field, LangChain has also evolved rapidly.
-
-This document serves to outline at a high level what has changed and why.
-
-### TLDR
-
-**As of 0.2.0:**
-
-- This release completes the work that we started with release 0.1.0 by removing the dependency of `langchain` on `langchain-community`.
-- `langchain` package no longer requires `langchain-community` . Instead `langchain-community` will now depend on `langchain-core` and `langchain` .
-- User code that still relies on deprecated imports from `langchain` will continue to work as long `langchain_community` is installed. These imports will start raising errors in release 0.4.x.
-
-**As of 0.1.0:**
-
-- `langchain` was split into the following component packages: `langchain-core`, `langchain`, `langchain-community`, `langchain-[partner]` to improve the usability of langchain code in production settings. You can read more about it on our [blog](https://blog.langchain.dev/langchain-v0-1-0/).
-
-### Ecosystem organization
-
-By the release of 0.1.0, LangChain had grown to a large ecosystem with many integrations and a large community.
-
-To improve the usability of LangChain in production, we split the single `langchain` package into multiple packages. This allowed us to create a good foundation architecture for the LangChain ecosystem and improve the usability of `langchain` in production.
-
-Here is the high level break down of the Eco-system:
-
-- **langchain-core**:  contains core abstractions involving LangChain Runnables, tooling for observability, and base implementations of important abstractions (e.g., Chat Models).
-- **langchain:** contains generic code that is built using interfaces defined in `langchain-core`. This package is for code that generalizes well across different implementations of specific interfaces. For example, `create_tool_calling_agent` works across chat models that support [tool calling capabilities](https://blog.langchain.dev/tool-calling-with-langchain/).
-- **langchain-community**: community maintained 3rd party integrations. Contains integrations based on interfaces defined in **langchain-core**. Maintained by the LangChain community.
-- **Partner Packages (e.g., langchain-[partner])**: Partner packages are packages dedicated to especially popular integrations (e.g., `langchain-openai`, `langchain-anthropic` etc.). The dedicated packages generally benefit from better reliability and support.
-- `langgraph`: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
-- `langserve`: Deploy LangChain chains as REST APIs.
-
-
-In the 0.1.0 release, `langchain-community` was retained as required a dependency of `langchain`.
-
-This allowed imports of vectorstores, chat models, and other integrations to continue working through `langchain`
-rather than forcing users to update all of their imports to `langchain-community`.
-
-For the 0.2.0 release, we’re removing the dependency of `langchain` on `langchain-community`. This is something we’ve been planning to do since the 0.1 release because we believe this is the right package architecture.
-
-Old imports will continue to work as long as `langchain-community` is installed. These imports will be removed in the 0.4.0 release.
-
-To understand why we think breaking the dependency of `langchain` on `langchain-community` is best we should understand what each package is meant to do.
-
-`langchain` is meant to contain high-level chains and agent architectures. The logic in these should be specified at the level of abstractions like `ChatModel` and `Retriever`, and should not be specific to any one integration. This has two main benefits:
-
-1. `langchain` is fairly lightweight. Here is the full list of required dependencies (after the split)
-
-    ```toml
-    python = ">=3.8.1,<4.0"
-    langchain-core = "^0.2.0"
-    langchain-text-splitters = ">=0.0.1,<0.1"
-    langsmith = "^0.1.17"
-    pydantic = ">=1,<3"
-    SQLAlchemy = ">=1.4,<3"
-    requests = "^2"
-    PyYAML = ">=5.3"
-    numpy = "^1"
-    aiohttp = "^3.8.3"
-    tenacity = "^8.1.0"
-    jsonpatch = "^1.33"
-    ```
-
-2. `langchain` chains/agents are largely integration-agnostic, which makes it easy to experiment with different integrations and future-proofs your code should there be issues with one specific integration.
-
-There is also a third less tangible benefit which is that being integration-agnostic forces us to find only those very generic abstractions and architectures which generalize well across integrations. Given how general the abilities of the foundational tech are, and how quickly the space is moving, having generic architectures is a good way of future-proofing your applications.
-
-`langchain-community` is intended to have all integration-specific components that are not yet being maintained in separate `langchain-{partner}` packages. Today this is still the majority of integrations and a lot of code. This code is primarily contributed by the community, while `langchain` is largely written by core maintainers. All of these integrations use optional dependencies and conditional imports, which prevents dependency bloat and conflicts but means compatible dependency versions are not made explicit. Given the volume of integrations in `langchain-community` and the speed at which integrations change, it’s very hard to follow semver versioning, and we currently don’t.
-
-All of which is to say that there’s no large benefits to `langchain` depending on `langchain-community` and some obvious downsides: the functionality in `langchain` should be integration agnostic anyways, `langchain-community` can’t be properly versioned, and depending on `langchain-community` increases the [vulnerability surface](https://github.com/langchain-ai/langchain/discussions/19083) of `langchain`.
-
-For more context about the reason for the organization please see our blog: https://blog.langchain.dev/langchain-v0-1-0/
\ No newline at end of file
diff --git a/langchain_md_files/versions/v0_3/index.mdx b/langchain_md_files/versions/v0_3/index.mdx
deleted file mode 100644
index f553ab19866160a963c1a25696bb4d15a79c1e3a..0000000000000000000000000000000000000000
--- a/langchain_md_files/versions/v0_3/index.mdx
+++ /dev/null
@@ -1,272 +0,0 @@
-# LangChain v0.3
-
-*Last updated: 09.16.24*
-
-## What's changed
-
-* All packages have been upgraded from Pydantic 1 to Pydantic 2 internally. Use of Pydantic 2 in user code is fully supported with all packages without the need for bridges like `langchain_core.pydantic_v1` or `pydantic.v1`.
-* Pydantic 1 will no longer be supported as it reached its end-of-life in June 2024.
-* Python 3.8 will no longer be supported as its end-of-life is October 2024.
-
-**These are the only breaking changes.**
-
-## What’s new
-
-The following features have been added during the development of 0.2.x:
-
-- Moved more integrations from `langchain-community` to their own `langchain-x` packages. This is a non-breaking change, as the legacy implementations are left in `langchain-community` and marked as deprecated. This allows us to better manage the dependencies of, test, and version these integrations. You can see all the latest integration packages in the [API reference](https://python.langchain.com/v0.2/api_reference/reference.html#integrations).
-- Simplified tool definition and usage. Read more [here](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/).
-- Added utilities for interacting with chat models: [universal model constructor](https://python.langchain.com/v0.2/docs/how_to/chat_models_universal_init/), [rate limiter](https://python.langchain.com/v0.2/docs/how_to/chat_model_rate_limiting/), [message utilities](https://python.langchain.com/v0.2/docs/how_to/#messages),
-- Added the ability to [dispatch custom events](https://python.langchain.com/v0.2/docs/how_to/callbacks_custom_events/).
-- Revamped integration docs and API reference. Read more [here](https://blog.langchain.dev/langchain-integration-docs-revamped/).
-- Marked as deprecated a number of legacy chains and added migration guides for all of them. These are slated for removal in `langchain` 1.0.0. See the deprecated chains and associated [migration guides here](https://python.langchain.com/v0.2/docs/versions/migrating_chains/).
-
-## How to update your code
-
-If you're using `langchain` / `langchain-community` / `langchain-core` 0.0 or 0.1, we recommend that you first [upgrade to 0.2](https://python.langchain.com/v0.2/docs/versions/v0_2/).
-
-If you're using `langgraph`, upgrade to `langgraph>=0.2.20,<0.3`. This will work with either 0.2 or 0.3 versions of all the base packages.
-
-Here is a complete list of all packages that have been released and what we recommend upgrading your version constraints to.
-Any package that now requires `langchain-core` 0.3 had a minor version bump.
-Any package that is now compatible with both `langchain-core` 0.2 and 0.3 had a patch version bump.
-
-You can use the `langchain-cli` to update deprecated imports automatically.
-The CLI will handle updating deprecated imports that were introduced in LangChain 0.0.x and LangChain 0.1, as
-well as updating the `langchain_core.pydantic_v1` and `langchain.pydantic_v1` imports.
-
-
-### Base packages
-
-| Package                  | Latest | Recommended constraint |
-|--------------------------|--------|------------------------|
-| langchain                | 0.3.0  | >=0.3,&lt;0.4             |
-| langchain-community      | 0.3.0  | >=0.3,&lt;0.4             |
-| langchain-text-splitters | 0.3.0  | >=0.3,&lt;0.4             |
-| langchain-core           | 0.3.0  | >=0.3,&lt;0.4             |
-| langchain-experimental   | 0.3.0  | >=0.3,&lt;0.4             |
-
-### Downstream packages
-
-| Package   | Latest | Recommended constraint |
-|-----------|--------|------------------------|
-| langgraph | 0.2.20 | >=0.2.20,&lt;0.3          |
-| langserve | 0.3.0  | >=0.3,&lt;0.4             |
-
-### Integration packages
-
-| Package                                | Latest  | Recommended constraint     |
-| -------------------------------------- | ------- | -------------------------- |
-| langchain-ai21                         | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-aws                          | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-anthropic                    | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-astradb                      | 0.4.1   | >=0.4.1,&lt;0.5               |
-| langchain-azure-dynamic-sessions       | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-box                          | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-chroma                       | 0.1.4   | >=0.1.4,&lt;0.2               |
-| langchain-cohere                       | 0.3.0   | >=0.3,&lt;0.4                 |
-| langchain-elasticsearch                | 0.3.0   | >=0.3,&lt;0.4                 |
-| langchain-exa                          | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-fireworks                    | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-groq                         | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-google-community             | 2.0.0   | >=2,&lt;3                     |
-| langchain-google-genai                 | 2.0.0   | >=2,&lt;3                     |
-| langchain-google-vertexai              | 2.0.0   | >=2,&lt;3                     |
-| langchain-huggingface                  | 0.1.0   | >=0.1,&lt;0.2                 |
-| langchain-ibm                          | 0.3.0   | >=0.3,&lt;0.4                 |
-| langchain-milvus                       | 0.1.6   | >=0.1.6,&lt;0.2               |
-| langchain-mistralai                    | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-mongodb                      | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-nomic                        | 0.1.3   | >=0.1.3,&lt;0.2               |
-| langchain-nvidia                       | 0.3.0   | >=0.3,&lt;0.4                 |
-| langchain-ollama                       | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-openai                       | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-pinecone                     | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-postgres                     | 0.0.13  | >=0.0.13,&lt;0.1              |
-| langchain-prompty                      | 0.1.0   | >=0.1,&lt;0.2                 |
-| langchain-qdrant                       | 0.1.4   | >=0.1.4,&lt;0.2               |
-| langchain-redis                        | 0.1.0   | >=0.1,&lt;0.2                 |
-| langchain-sema4                        | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-together                     | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-unstructured                 | 0.1.4   | >=0.1.4,&lt;0.2               |
-| langchain-upstage                      | 0.3.0   | >=0.3,&lt;0.4                 |
-| langchain-voyageai                     | 0.2.0   | >=0.2,&lt;0.3                 |
-| langchain-weaviate                     | 0.0.3   | >=0.0.3,&lt;0.1               |
-
-Once you've updated to recent versions of the packages, you may need to address the following issues stemming from the internal switch from Pydantic v1 to Pydantic v2:
-
-- If your code depends on Pydantic aside from LangChain, you will need to upgrade your pydantic version constraints to be `pydantic>=2,<3`.  See [Pydantic’s migration guide](https://docs.pydantic.dev/latest/migration/) for help migrating your non-LangChain code to Pydantic v2 if you use pydantic v1.
-- There are a number of side effects to LangChain components caused by the internal switch from Pydantic v1 to v2. We have listed some of the common cases below together with the recommended solutions.
-
-## Common issues when transitioning to Pydantic 2
-
-### 1. Do not use the `langchain_core.pydantic_v1` namespace
-
-Replace any usage of `langchain_core.pydantic_v1` or `langchain.pydantic_v1` with
-direct imports from `pydantic`.
-
-For example,
-
-```python
-from langchain_core.pydantic_v1 import BaseModel
-```
-
-to:
-
-```python
-from pydantic import BaseModel
-```
-
-This may require you to make additional updates to your Pydantic code given that there are a number of breaking changes in Pydantic 2. See the [Pydantic Migration](https://docs.pydantic.dev/latest/migration/) for how to upgrade your code from Pydantic 1 to 2.
-
-### 2. Passing Pydantic objects to LangChain APIs
-
-Users using the following APIs:
-
-* `BaseChatModel.bind_tools`
-* `BaseChatModel.with_structured_output`
-* `Tool.from_function`
-* `StructuredTool.from_function`
-
-should ensure that they are passing Pydantic 2 objects to these APIs rather than
-Pydantic 1 objects (created via the `pydantic.v1` namespace of pydantic 2).
-
-:::caution
-While `v1` objects may be accepted by some of these APIs, users are advised to
-use Pydantic 2 objects to avoid future issues.
-:::
-
-### 3. Sub-classing LangChain models
-
-Any sub-classing from existing LangChain models (e.g., `BaseTool`, `BaseChatModel`, `LLM`)
-should upgrade to use Pydantic 2 features.
-
-For example, any user code that's relying on Pydantic 1 features (e.g., `validator`) should
-be updated to the Pydantic 2 equivalent (e.g., `field_validator`), and any references to
-`pydantic.v1`, `langchain_core.pydantic_v1`, `langchain.pydantic_v1` should be replaced
-with imports from `pydantic`.
-
-```python
-from pydantic.v1 import validator, Field # if pydantic 2 is installed
-# from pydantic import validator, Field # if pydantic 1 is installed
-# from langchain_core.pydantic_v1 import validator, Field
-# from langchain.pydantic_v1 import validator, Field
-
-class CustomTool(BaseTool): # BaseTool is v1 code
-    x: int = Field(default=1)
-
-    def _run(*args, **kwargs):
-        return "hello"
-
-    @validator('x') # v1 code
-    @classmethod
-    def validate_x(cls, x: int) -> int:
-        return 1
-```
-
-Should change to:
-
-```python
-from pydantic import Field, field_validator # pydantic v2
-from langchain_core.pydantic_v1 import BaseTool
-
-class CustomTool(BaseTool): # BaseTool is v1 code
-    x: int = Field(default=1)
-
-    def _run(*args, **kwargs):
-        return "hello"
-
-    @field_validator('x') # v2 code
-    @classmethod
-    def validate_x(cls, x: int) -> int:
-        return 1
-
-
-CustomTool(
-    name='custom_tool',
-    description="hello",
-    x=1,
-)
-```
-
-### 4. model_rebuild()
-
-When sub-classing from LangChain models, users may need to add relevant imports
-to the file and rebuild the model.
-
-You can read more about `model_rebuild` [here](https://docs.pydantic.dev/latest/concepts/models/#rebuilding-model-schema).
-
-```python
-from langchain_core.output_parsers import BaseOutputParser
-
-
-class FooParser(BaseOutputParser):
-    ...
-```
-
-New code:
-
-```python
-from typing import Optional as Optional
-
-from langchain_core.output_parsers import BaseOutputParser
-
-class FooParser(BaseOutputParser):
-    ...
-
-FooParser.model_rebuild()
-```
-
-## Migrate using langchain-cli
-
-The `langchain-cli` can help update deprecated LangChain imports in your code automatically.
-
-Please note that the `langchain-cli` only handles deprecated LangChain imports and cannot
-help to upgrade your code from pydantic 1 to pydantic 2.
-
-For help with the Pydantic 1 to 2 migration itself please refer to the [Pydantic Migration Guidelines](https://docs.pydantic.dev/latest/migration/).
-
-As of 0.0.31, the `langchain-cli` relies on [gritql](https://about.grit.io/) for applying code mods.
-
-### Installation
-
-```bash
-pip install -U langchain-cli
-langchain-cli --version # <-- Make sure the version is at least 0.0.31
-```
-
-### Usage
-
-Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`).
-
-The `langchain-cli` will handle the `langchain_core.pydantic_v1` deprecation introduced in LangChain 0.3 as well
-as older deprecations (e.g.,`from langchain.chat_models import ChatOpenAI` which should be `from langchain_openai import ChatOpenAI`),
-
-You will need to run the migration script **twice** as it only applies one import replacement per run.
-
-For example, say that your code is still using the old import `from langchain.chat_models import ChatOpenAI`:
-
-After the first run, you’ll get: `from langchain_community.chat_models import ChatOpenAI`
-After the second run, you’ll get: `from langchain_openai import ChatOpenAI`
-
-```bash
-# Run a first time
-# Will replace from langchain.chat_models import ChatOpenAI
-langchain-cli migrate --help [path to code] # Help
-langchain-cli migrate [path to code] # Apply
-
-# Run a second time to apply more import replacements
-langchain-cli migrate --diff [path to code] # Preview
-langchain-cli migrate [path to code] # Apply
-```
-
-### Other options
-
-```bash
-# See help menu
-langchain-cli migrate --help
-# Preview Changes without applying
-langchain-cli migrate --diff [path to code]
-# Approve changes interactively
-langchain-cli migrate --interactive [path to code]
-```
diff --git a/openai-cookbook_md_files/How_to_build_an_agent_with_the_node_sdk.mdx b/openai-cookbook_md_files/How_to_build_an_agent_with_the_node_sdk.mdx
deleted file mode 100644
index c28975d173ab12f99583786e44ae80002f6df7d6..0000000000000000000000000000000000000000
--- a/openai-cookbook_md_files/How_to_build_an_agent_with_the_node_sdk.mdx
+++ /dev/null
@@ -1,492 +0,0 @@
-# How to build an agent with the Node.js SDK
-
-OpenAI functions enable your app to take action based on user inputs. This means that it can, e.g., search the web, send emails, or book tickets on behalf of your users, making it more powerful than a regular chatbot.
-
-In this tutorial, you will build an app that uses OpenAI functions along with the latest version of the Node.js SDK. The app runs in the browser, so you only need a code editor and, e.g., VS Code Live Server to follow along locally. Alternatively, write your code directly in the browser via [this code playground at Scrimba.](https://scrimba.com/scrim/c6r3LkU9)
-
-## What you will build
-
-Our app is a simple agent that helps you find activities in your area.
-It has access to two functions, `getLocation()` and `getCurrentWeather()`,
-which means it can figure out where you’re located and what the weather
-is at the moment.
-
-At this point, it's important to understand that
-OpenAI doesn't execute any code for you. It just tells your app which
-functions it should use in a given scenario, and then leaves it up to
-your app to invoke them.
-
-Once our agent knows your location and the weather, it'll use GPT’s
-internal knowledge to suggest suitable local activities for you.
-
-## Importing the SDK and authenticating with OpenAI
-
-We start by importing the OpenAI SDK at the top of our JavaScript file and authenticate with our API key, which we have stored as an environment variable.
-
-```js
-import OpenAI from "openai";
-
-const openai = new OpenAI({
-  apiKey: process.env.OPENAI_API_KEY,
-  dangerouslyAllowBrowser: true,
-});
-```
-
-Since we're running our code in a browser environment at Scrimba, we also need to set `dangerouslyAllowBrowser: true` to confirm we understand the risks involved with client-side API requests. Please note that you should move these requests over to a Node server in a production app.
-
-## Creating our two functions
-
-Next, we'll create the two functions. The first one - `getLocation` -
-uses the [IP API](https://ipapi.co/) to get the location of the
-user.
-
-```js
-async function getLocation() {
-  const response = await fetch("https://ipapi.co/json/");
-  const locationData = await response.json();
-  return locationData;
-}
-```
-
-The IP API returns a bunch of data about your location, including your
-latitude and longitude, which we’ll use as arguments in the second
-function `getCurrentWeather`. It uses the [Open Meteo
-API](https://open-meteo.com/) to get the current weather data, like
-this:
-
-```js
-async function getCurrentWeather(latitude, longitude) {
-  const url = `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}&hourly=apparent_temperature`;
-  const response = await fetch(url);
-  const weatherData = await response.json();
-  return weatherData;
-}
-```
-
-## Describing our functions for OpenAI
-
-For OpenAI to understand the purpose of these functions, we need to
-describe them using a specific schema. We'll create an array called
-`tools` that contains one object per function. Each object
-will have two keys: `type`, `function`, and the `function` key has 
-three subkeys: `name`, `description`, and `parameters`.
-
-```js
-const tools = [
-  {
-    type: "function",
-    function: {
-      name: "getCurrentWeather",
-      description: "Get the current weather in a given location",
-      parameters: {
-        type: "object",
-        properties: {
-          latitude: {
-            type: "string",
-          },
-          longitude: {
-            type: "string",
-          },
-        },
-        required: ["longitude", "latitude"],
-      },
-    }
-  },
-  {
-    type: "function",
-    function: {
-      name: "getLocation",
-      description: "Get the user's location based on their IP address",
-      parameters: {
-        type: "object",
-        properties: {},
-      },
-    }
-  },
-];
-```
-
-## Setting up the messages array
-
-We also need to define a `messages` array. This will keep track of all of the messages back and forth between our app and OpenAI.
-
-The first object in the array should always have the `role` property set to `"system"`, which tells OpenAI that this is how we want it to behave.
-
-```js
-const messages = [
-  {
-    role: "system",
-    content:
-      "You are a helpful assistant. Only use the functions you have been provided with.",
-  },
-];
-```
-
-## Creating the agent function
-
-We are now ready to build the logic of our app, which lives in the
-`agent` function. It is asynchronous and takes one argument: the
-`userInput`.
-
-We start by pushing the `userInput` to the messages array. This time, we set the `role` to `"user"`, so that OpenAI knows that this is the input from the user.
-
-```js
-async function agent(userInput) {
-  messages.push({
-    role: "user",
-    content: userInput,
-  });
-  const response = await openai.chat.completions.create({
-    model: "gpt-4",
-    messages: messages,
-    tools: tools,
-  });
-  console.log(response);
-}
-```
-
-Next, we'll send a request to the Chat completions endpoint via the
-`chat.completions.create()` method in the Node SDK. This method takes a
-configuration object as an argument. In it, we'll specify three
-properties:
-
-- `model` - Decides which AI model we want to use (in our case,
-  GPT-4).
-- `messages` - The entire history of messages between the user and the
-  AI up until this point.
-- `tools` - A list of tools the model may call. Currently, only 
-  functions are supported as a tool., we'll we use the `tools` array we 
-  created earlier.
-
-## Running our app with a simple input
-
-Let's try to run the `agent` with an input that requires a function call to give a suitable reply.
-
-```js
-agent("Where am I located right now?");
-```
-
-When we run the code above, we see the response from OpenAI logged out
-to the console like this:
-
-```js
-{
-    id: "chatcmpl-84ojoEJtyGnR6jRHK2Dl4zTtwsa7O",
-    object: "chat.completion",
-    created: 1696159040,
-    model: "gpt-4-0613",
-    choices: [{
-        index: 0,
-        message: {
-            role: "assistant",
-            content: null,
-            tool_calls: [
-              id: "call_CBwbo9qoXUn1kTR5pPuv6vR1",
-              type: "function",
-              function: {
-                name: "getLocation",
-                arguments: "{}"
-              }
-            ]
-        },
-        logprobs: null,
-        finish_reason: "tool_calls" // OpenAI wants us to call a function
-    }],
-    usage: {
-        prompt_tokens: 134,
-        completion_tokens: 6,
-        total_tokens: 140
-    }
-     system_fingerprint: null
-}
-```
-
-This response tells us that we should call one of our functions, as it contains the following key: `finish_reason: "tool_calls"`.
-
-The name of the function can be found in the
-`response.choices[0].message.tool_calls[0].function.name` key, which is set to
-`"getLocation"`.
-
-## Turning the OpenAI response into a function call
-
-Now that we have the name of the function as a string, we'll need to
-translate that into a function call. To help us with that, we'll gather
-both of our functions in an object called `availableTools`:
-
-```js
-const availableTools = {
-  getCurrentWeather,
-  getLocation,
-};
-```
-
-This is handy because we'll be able to access the `getLocation` function
-via bracket notation and the string we got back from OpenAI, like this:
-`availableTools["getLocation"]`.
-
-```js
-const { finish_reason, message } = response.choices[0];
-
-if (finish_reason === "tool_calls" && message.tool_calls) {
-  const functionName = message.tool_calls[0].function.name;
-  const functionToCall = availableTools[functionName];
-  const functionArgs = JSON.parse(message.tool_calls[0].function.arguments);
-  const functionArgsArr = Object.values(functionArgs);
-  const functionResponse = await functionToCall.apply(null, functionArgsArr);
-  console.log(functionResponse);
-}
-```
-
-We're also grabbing ahold of any arguments OpenAI wants us to pass into
-the function: `message.tool_calls[0].function.arguments`.
-However, we won't need any arguments for this first function call.
-
-If we run the code again with the same input
-(`"Where am I located right now?"`), we'll see that `functionResponse`
-is an object filled with location about where the user is located right
-now. In my case, that is Oslo, Norway.
-
-```js
-{ip: "193.212.60.170", network: "193.212.60.0/23", version: "IPv4", city: "Oslo", region: "Oslo County", region_code: "03", country: "NO", country_name: "Norway", country_code: "NO", country_code_iso3: "NOR", country_capital: "Oslo", country_tld: ".no", continent_code: "EU", in_eu: false, postal: "0026", latitude: 59.955, longitude: 10.859, timezone: "Europe/Oslo", utc_offset: "+0200", country_calling_code: "+47", currency: "NOK", currency_name: "Krone", languages: "no,nb,nn,se,fi", country_area: 324220, country_population: 5314336, asn: "AS2119", org: "Telenor Norge AS"}
-```
-
-We'll add this data to a new item in the `messages` array, where we also
-specify the name of the function we called.
-
-```js
-messages.push({
-  role: "function",
-  name: functionName,
-  content: `The result of the last function was this: ${JSON.stringify(
-    functionResponse
-  )}
-  `,
-});
-```
-
-Notice that the `role` is set to `"function"`. This tells OpenAI
-that the `content` parameter contains the result of the function call
-and not the input from the user.
-
-At this point, we need to send a new request to OpenAI with this updated
-`messages` array. However, we don’t want to hard code a new function
-call, as our agent might need to go back and forth between itself and
-GPT several times until it has found the final answer for the user.
-
-This can be solved in several different ways, e.g. recursion, a
-while-loop, or a for-loop. We'll use a good old for-loop for the sake of
-simplicity.
-
-## Creating the loop
-
-At the top of the `agent` function, we'll create a loop that lets us run
-the entire procedure up to five times.
-
-If we get back `finish_reason: "tool_calls"` from GPT, we'll just
-push the result of the function call to the `messages` array and jump to
-the next iteration of the loop, triggering a new request.
-
-If we get `finish_reason: "stop"` back, then GPT has found a suitable
-answer, so we'll return the function and cancel the loop.
-
-```js
-for (let i = 0; i < 5; i++) {
-  const response = await openai.chat.completions.create({
-    model: "gpt-4",
-    messages: messages,
-    tools: tools,
-  });
-  const { finish_reason, message } = response.choices[0];
-
-  if (finish_reason === "tool_calls" && message.tool_calls) {
-    const functionName = message.tool_calls[0].function.name;
-    const functionToCall = availableTools[functionName];
-    const functionArgs = JSON.parse(message.tool_calls[0].function.arguments);
-    const functionArgsArr = Object.values(functionArgs);
-    const functionResponse = await functionToCall.apply(null, functionArgsArr);
-
-    messages.push({
-      role: "function",
-      name: functionName,
-      content: `
-          The result of the last function was this: ${JSON.stringify(
-            functionResponse
-          )}
-          `,
-    });
-  } else if (finish_reason === "stop") {
-    messages.push(message);
-    return message.content;
-  }
-}
-return "The maximum number of iterations has been met without a suitable answer. Please try again with a more specific input.";
-```
-
-If we don't see a `finish_reason: "stop"` within our five iterations,
-we'll return a message saying we couldn’t find a suitable answer.
-
-## Running the final app
-
-At this point, we are ready to try our app! I'll ask the agent to
-suggest some activities based on my location and the current weather.
-
-```js
-const response = await agent(
-  "Please suggest some activities based on my location and the current weather."
-);
-console.log(response);
-```
-
-Here's what we see in the console (formatted to make it easier to read):
-
-```js
-Based on your current location in Oslo, Norway and the weather (15°C and snowy),
-here are some activity suggestions:
-
-1. A visit to the Oslo Winter Park for skiing or snowboarding.
-2. Enjoy a cosy day at a local café or restaurant.
-3. Visit one of Oslo's many museums. The Fram Museum or Viking Ship Museum offer interesting insights into Norway’s seafaring history.
-4. Take a stroll in the snowy streets and enjoy the beautiful winter landscape.
-5. Enjoy a nice book by the fireplace in a local library.
-6. Take a fjord sightseeing cruise to enjoy the snowy landscapes.
-
-Always remember to bundle up and stay warm. Enjoy your day!
-```
-
-If we peak under the hood, and log out `response.choices[0].message` in
-each iteration of the loop, we'll see that GPT has instructed us to use
-both our functions before coming up with an answer.
-
-First, it tells us to call the `getLocation` function. Then it tells us
-to call the `getCurrentWeather` function with
-`"longitude": "10.859", "latitude": "59.955"` passed in as the
-arguments. This is data it got back from the first function call we did.
-
-```js
-{"role":"assistant","content":null,"tool_calls":[{"id":"call_Cn1KH8mtHQ2AMbyNwNJTweEP","type":"function","function":{"name":"getLocation","arguments":"{}"}}]}
-{"role":"assistant","content":null,"tool_calls":[{"id":"call_uc1oozJfGTvYEfIzzcsfXfOl","type":"function","function":{"name":"getCurrentWeather","arguments":"{\n\"latitude\": \"10.859\",\n\"longitude\": \"59.955\"\n}"}}]}
-```
-
-You've now built an AI agent using OpenAI functions and the Node.js SDK! If you're looking for an extra challenge, consider enhancing this app. For example, you could add a function that fetches up-to-date information on events and activities in the user's location.
-
-Happy coding!
-
-<details>
-<summary>Complete code</summary>
-
-```js
-import OpenAI from "openai";
-
-const openai = new OpenAI({
-  apiKey: process.env.OPENAI_API_KEY,
-  dangerouslyAllowBrowser: true,
-});
-
-async function getLocation() {
-  const response = await fetch("https://ipapi.co/json/");
-  const locationData = await response.json();
-  return locationData;
-}
-
-async function getCurrentWeather(latitude, longitude) {
-  const url = `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}&hourly=apparent_temperature`;
-  const response = await fetch(url);
-  const weatherData = await response.json();
-  return weatherData;
-}
-
-const tools = [
-  {
-    type: "function",
-    function: {
-      name: "getCurrentWeather",
-      description: "Get the current weather in a given location",
-      parameters: {
-        type: "object",
-        properties: {
-          latitude: {
-            type: "string",
-          },
-          longitude: {
-            type: "string",
-          },
-        },
-        required: ["longitude", "latitude"],
-      },
-    }
-  },
-  {
-    type: "function",
-    function: {
-      name: "getLocation",
-      description: "Get the user's location based on their IP address",
-      parameters: {
-        type: "object",
-        properties: {},
-      },
-    }
-  },
-];
-
-const availableTools = {
-  getCurrentWeather,
-  getLocation,
-};
-
-const messages = [
-  {
-    role: "system",
-    content: `You are a helpful assistant. Only use the functions you have been provided with.`,
-  },
-];
-
-async function agent(userInput) {
-  messages.push({
-    role: "user",
-    content: userInput,
-  });
-
-  for (let i = 0; i < 5; i++) {
-    const response = await openai.chat.completions.create({
-      model: "gpt-4",
-      messages: messages,
-      tools: tools,
-    });
-
-    const { finish_reason, message } = response.choices[0];
-
-    if (finish_reason === "tool_calls" && message.tool_calls) {
-      const functionName = message.tool_calls[0].function.name;
-      const functionToCall = availableTools[functionName];
-      const functionArgs = JSON.parse(message.tool_calls[0].function.arguments);
-      const functionArgsArr = Object.values(functionArgs);
-      const functionResponse = await functionToCall.apply(
-        null,
-        functionArgsArr
-      );
-
-      messages.push({
-        role: "function",
-        name: functionName,
-        content: `
-                The result of the last function was this: ${JSON.stringify(
-                  functionResponse
-                )}
-                `,
-      });
-    } else if (finish_reason === "stop") {
-      messages.push(message);
-      return message.content;
-    }
-  }
-  return "The maximum number of iterations has been met without a suitable answer. Please try again with a more specific input.";
-}
-
-const response = await agent(
-  "Please suggest some activities based on my location and the weather."
-);
-
-console.log("response:", response);
-```
-
-</details>
diff --git a/openai-cookbook_md_files/vector_databases/supabase/semantic-search.mdx b/openai-cookbook_md_files/vector_databases/supabase/semantic-search.mdx
deleted file mode 100644
index 77bb61f23d0a82bf11cb7d4075c516e5ed17c102..0000000000000000000000000000000000000000
--- a/openai-cookbook_md_files/vector_databases/supabase/semantic-search.mdx
+++ /dev/null
@@ -1,276 +0,0 @@
-# Semantic search using Supabase Vector
-
-The purpose of this guide is to demonstrate how to store OpenAI embeddings in [Supabase Vector](https://supabase.com/docs/guides/ai) (Postgres + pgvector) for the purposes of semantic search.
-
-[Supabase](https://supabase.com/docs) is an open-source Firebase alternative built on top of [Postgres](https://en.wikipedia.org/wiki/PostgreSQL), a production-grade SQL database. Since Supabase Vector is built on [pgvector](https://github.com/pgvector/pgvector), you can store your embeddings within the same database that holds the rest of your application data. When combined with pgvector's indexing algorithms, vector search remains [fast at large scales](https://supabase.com/blog/increase-performance-pgvector-hnsw).
-
-Supabase adds an ecosystem of services and tools to make app development as quick as possible (such as an [auto-generated REST API](https://postgrest.org/)). We'll use these services to store and query embeddings within Postgres.
-
-This guide covers:
-
-1. [Setting up your database](#setup-database)
-2. [Creating a SQL table](#create-a-vector-table) that can store vector data
-3. [Generating OpenAI embeddings](#generate-openai-embeddings) using OpenAI's JavaScript client
-4. [Storing the embeddings](#store-embeddings-in-database) in your SQL table using the Supabase JavaScript client
-5. [Performing semantic search](#semantic-search) over the embeddings using a Postgres function and the Supabase JavaScript client
-
-## Setup database
-
-First head over to https://database.new to provision your Supabase database. This will create a Postgres database on the Supabase cloud platform. Alternatively, you can follow the [local development](https://supabase.com/docs/guides/cli/getting-started) options if you prefer to run your database locally using Docker.
-
-In the studio, jump to the [SQL editor](https://supabase.com/dashboard/project/_/sql/new) and execute the following SQL to enable pgvector:
-
-```sql
--- Enable the pgvector extension
-create extension if not exists vector;
-```
-
-> In a production application, the best practice is to use [database migrations](https://supabase.com/docs/guides/cli/local-development#database-migrations) so that all SQL operations are managed within source control. To keep things simple in this guide, we'll execute queries directly in the SQL Editor. If you are building a production app, feel free to move these into a database migration.
-
-## Create a vector table
-
-Next we'll create a table to store documents and embeddings. In the SQL Editor, run:
-
-```sql
-create table documents (
-  id bigint primary key generated always as identity,
-  content text not null,
-  embedding vector (1536) not null
-);
-```
-
-Since Supabase is built on Postgres, we're just using regular SQL here. You can modify this table however you like to better fit your application. If you have existing database tables, you can simply add a new `vector` column to the appropriate table.
-
-The important piece to understand is the `vector` data type, which is a new data type that became available when we enabled the pgvector extension earlier. The size of the vector (1536 here) represents the number of dimensions in the embedding. Since we're using OpenAI's `text-embedding-3-small` model in this example, we set the vector size to 1536.
-
-Let's go ahead and create a vector index on this table so that future queries remain performant as the table grows:
-
-```sql
-create index on documents using hnsw (embedding vector_ip_ops);
-```
-
-This index uses the [HNSW](https://supabase.com/docs/guides/ai/vector-indexes/hnsw-indexes) algorithm to index vectors stored in the `embedding` column, and specifically when using the inner product operator (`<#>`). We'll explain more about this operator later when we implement our match function.
-
-Let's also follow security best practices by enabling row level security on the table:
-
-```sql
-alter table documents enable row level security;
-```
-
-This will prevent unauthorized access to this table through the auto-generated REST API (more on this shortly).
-
-## Generate OpenAI embeddings
-
-This guide uses JavaScript to generate embeddings, but you can easily modify it to use any [language supported by OpenAI](https://platform.openai.com/docs/libraries).
-
-If you are using JavaScript, feel free to use whichever server-side JavaScript runtime that you prefer (Node.js, Deno, Supabase Edge Functions).
-
-If you're using Node.js, first install `openai` as a dependency:
-
-```shell
-npm install openai
-```
-
-then import it:
-
-```js
-import OpenAI from "openai";
-```
-
-If you're using Deno or Supabase Edge Functions, you can import `openai` directly from a URL:
-
-```js
-import OpenAI from "https://esm.sh/openai@4";
-```
-
-> In this example we import from https://esm.sh which is a CDN that automatically fetches the respective NPM module for you and serves it over HTTP.
-
-Next we'll generate an OpenAI embedding using [`text-embedding-3-small`](https://platform.openai.com/docs/guides/embeddings/embedding-models):
-
-```js
-const openai = new OpenAI();
-
-const input = "The cat chases the mouse";
-
-const result = await openai.embeddings.create({
-  input,
-  model: "text-embedding-3-small",
-});
-
-const [{ embedding }] = result.data;
-```
-
-Remember that you will need an [OpenAI API key](https://platform.openai.com/api-keys) to interact with the OpenAI API. You can pass this as an environment variable called `OPENAI_API_KEY`, or manually set it when you instantiate your OpenAI client:
-
-```js
-const openai = new OpenAI({
-  apiKey: "<openai-api-key>",
-});
-```
-
-_**Remember:** Never hard-code API keys in your code. Best practice is to either store it in a `.env` file and load it using a library like [`dotenv`](https://github.com/motdotla/dotenv) or load it from an external key management system._
-
-## Store embeddings in database
-
-Supabase comes with an [auto-generated REST API](https://postgrest.org/) that dynamically builds REST endpoints for each of your tables. This means you don't need to establish a direct Postgres connection to your database - instead you can interact with it simply using by the REST API. This is especially useful in serverless environments that run short-lived processes where re-establishing a database connection every time can be expensive.
-
-Supabase comes with a number of [client libraries](https://supabase.com/docs#client-libraries) to simplify interaction with the REST API. In this guide we'll use the [JavaScript client library](https://supabase.com/docs/reference/javascript), but feel free to adjust this to your preferred language.
-
-If you're using Node.js, install `@supabase/supabase-js` as a dependency:
-
-```shell
-npm install @supabase/supabase-js
-```
-
-then import it:
-
-```js
-import { createClient } from "@supabase/supabase-js";
-```
-
-If you're using Deno or Supabase Edge Functions, you can import `@supabase/supabase-js` directly from a URL:
-
-```js
-import { createClient } from "https://esm.sh/@supabase/supabase-js@2";
-```
-
-Next we'll instantiate our Supabase client and configure it so that it points to your Supabase project. In this guide we'll store a reference to your Supabase URL and key in a `.env` file, but feel free to modify this based on how your application handles configuration.
-
-If you are using Node.js or Deno, add your Supabase URL and service role key to a `.env` file. If you are using the cloud platform, you can find these from your Supabase dashboard [settings page](https://supabase.com/dashboard/project/_/settings/api). If you're running Supabase locally, you can find these by running `npx supabase status` in a terminal.
-
-_.env_
-
-```
-SUPABASE_URL=<supabase-url>
-SUPABASE_SERVICE_ROLE_KEY=<supabase-service-role-key>
-```
-
-If you are using Supabase Edge Functions, these environment variables are automatically injected into your function for you so you can skip the above step.
-
-Next we'll pull these environment variables into our app.
-
-In Node.js, install the `dotenv` dependency:
-
-```shell
-npm install dotenv
-```
-
-And retrieve the environment variables from `process.env`:
-
-```js
-import { config } from "dotenv";
-
-// Load .env file
-config();
-
-const supabaseUrl = process.env["SUPABASE_URL"];
-const supabaseServiceRoleKey = process.env["SUPABASE_SERVICE_ROLE_KEY"];
-```
-
-In Deno, load the `.env` file using the `dotenv` standard library:
-
-```js
-import { load } from "https://deno.land/std@0.208.0/dotenv/mod.ts";
-
-// Load .env file
-const env = await load();
-
-const supabaseUrl = env["SUPABASE_URL"];
-const supabaseServiceRoleKey = env["SUPABASE_SERVICE_ROLE_KEY"];
-```
-
-In Supabase Edge Functions, simply load the injected environment variables directly:
-
-```js
-const supabaseUrl = Deno.env.get("SUPABASE_URL");
-const supabaseServiceRoleKey = Deno.env.get("SUPABASE_SERVICE_ROLE_KEY");
-```
-
-Next let's instantiate our `supabase` client:
-
-```js
-const supabase = createClient(supabaseUrl, supabaseServiceRoleKey, {
-  auth: { persistSession: false },
-});
-```
-
-From here we use the `supabase` client to insert our text and embedding (generated earlier) into the database:
-
-```js
-const { error } = await supabase.from("documents").insert({
-  content: input,
-  embedding,
-});
-```
-
-> In production, best practice would be to check the response `error` to see if there were any problems inserting the data and handle it accordingly.
-
-## Semantic search
-
-Finally let's perform semantic search over the embeddings in our database. At this point we'll assume your `documents` table has been filled with multiple records that we can search over.
-
-Let's create a match function in Postgres that performs the semantic search query. Execute the following in the [SQL Editor](https://supabase.com/dashboard/project/_/sql/new):
-
-```sql
-create function match_documents (
-  query_embedding vector (1536),
-  match_threshold float,
-)
-returns setof documents
-language plpgsql
-as $$
-begin
-  return query
-  select *
-  from documents
-  where documents.embedding <#> query_embedding < -match_threshold
-  order by documents.embedding <#> query_embedding;
-end;
-$$;
-```
-
-This function accepts a `query_embedding` which represents the embedding generated from the search query text (more on this shortly). It also accepts a `match_threshold` which specifies how similar the document embeddings have to be in order for `query_embedding` to count as a match.
-
-Inside the function we implement the query which does two things:
-
-- Filters the documents to only include those who's embeddings match within the above `match_threshold`. Since the `<#>` operator performs the negative inner product (versus positive inner product), we negate the similarity threshold before comparing. This means a `match_threshold` of 1 is most similar, and -1 is most dissimilar.
-- Orders the documents by negative inner product (`<#>`) ascending. This allows us to retrieve documents that match closest first.
-
-> Since OpenAI embeddings are normalized, we opted to use inner product (`<#>`) because it is slightly more performant than other operators like cosine distance (`<=>`). It is important to note though this only works because the embeddings are normalized - if they weren't, cosine distance should be used.
-
-Now we can call this function from our application using the `supabase.rpc()` method:
-
-```js
-const query = "What does the cat chase?";
-
-// First create an embedding on the query itself
-const result = await openai.embeddings.create({
-  input: query,
-  model: "text-embedding-3-small",
-});
-
-const [{ embedding }] = result.data;
-
-// Then use this embedding to search for matches
-const { data: documents, error: matchError } = await supabase
-  .rpc("match_documents", {
-    query_embedding: embedding,
-    match_threshold: 0.8,
-  })
-  .select("content")
-  .limit(5);
-```
-
-In this example, we set a match threshold to 0.8. Adjust this threshold based on what works best with your data.
-
-Note that since `match_documents` returns a set of `documents`, we can treat this `rpc()` like a regular table query. Specifically this means we can chain additional commands to this query, like `select()` and `limit()`. Here we select just the columns we care about from the `documents` table (`content`), and we limit the number of documents returned (max 5 in this example).
-
-At this point you have a list of documents that matched the query based on semantic relationship, ordered by most similar first.
-
-## Next steps
-
-You can use this example as the foundation for other semantic search techniques, like retrieval augmented generation (RAG).
-
-For more information on OpenAI embeddings, read the [Embedding](https://platform.openai.com/docs/guides/embeddings) docs.
-
-For more information on Supabase Vector, read the [AI & Vector](https://supabase.com/docs/guides/ai) docs.