Upload folder using huggingface_hub

#2
by omarsol - opened
This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/data_level0.bin +0 -3
  3. chroma-db-all_sources/document_dict_all_sources.pkl +0 -3
  4. chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/data_level0.bin +3 -0
  5. {chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc → chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de}/header.bin +1 -1
  6. {chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc → chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de}/index_metadata.pickle +2 -2
  7. {chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc → chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de}/length.bin +2 -2
  8. {chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc → chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de}/link_lists.bin +2 -2
  9. {chroma-db-all_sources → chroma-db-langchain}/chroma.sqlite3 +2 -2
  10. chroma-db-langchain/document_dict_langchain.pkl +3 -0
  11. chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/data_level0.bin +3 -0
  12. chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/header.bin +3 -0
  13. chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/index_metadata.pickle +3 -0
  14. chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/length.bin +3 -0
  15. chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/link_lists.bin +3 -0
  16. all_sources_contextual_nodes.pkl → chroma-db-llama_index/chroma.sqlite3 +2 -2
  17. chroma-db-llama_index/document_dict_llama_index.pkl +3 -0
  18. chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/data_level0.bin +3 -0
  19. chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/header.bin +3 -0
  20. chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/index_metadata.pickle +3 -0
  21. chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/length.bin +3 -0
  22. chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/link_lists.bin +3 -0
  23. chroma-db-openai_cookbooks/chroma.sqlite3 +3 -0
  24. chroma-db-openai_cookbooks/document_dict_openai_cookbooks.pkl +3 -0
  25. chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/data_level0.bin +3 -0
  26. chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/header.bin +3 -0
  27. chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/length.bin +3 -0
  28. chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/link_lists.bin +0 -0
  29. chroma-db-peft/chroma.sqlite3 +3 -0
  30. chroma-db-peft/document_dict_peft.pkl +3 -0
  31. chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/data_level0.bin +3 -0
  32. chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/header.bin +3 -0
  33. chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/index_metadata.pickle +3 -0
  34. chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/length.bin +3 -0
  35. chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/link_lists.bin +3 -0
  36. chroma-db-transformers/chroma.sqlite3 +3 -0
  37. chroma-db-transformers/document_dict_transformers.pkl +3 -0
  38. chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/data_level0.bin +3 -0
  39. chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/header.bin +3 -0
  40. chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/length.bin +3 -0
  41. chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/link_lists.bin +0 -0
  42. chroma-db-trl/chroma.sqlite3 +3 -0
  43. chroma-db-trl/document_dict_trl.pkl +3 -0
  44. langchain_md_files/_templates/integration.mdx +0 -60
  45. langchain_md_files/additional_resources/arxiv_references.mdx +0 -1101
  46. langchain_md_files/additional_resources/dependents.mdx +0 -554
  47. langchain_md_files/additional_resources/tutorials.mdx +0 -52
  48. langchain_md_files/additional_resources/youtube.mdx +0 -63
  49. langchain_md_files/changes/changelog/core.mdx +0 -10
  50. langchain_md_files/changes/changelog/langchain.mdx +0 -93
.DS_Store DELETED
Binary file (6.15 kB)
 
chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc/data_level0.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8b876ac2e179211d41424ec19f40153fa118545766dee59f9753a73b04f350f
3
- size 135552000
 
 
 
 
chroma-db-all_sources/document_dict_all_sources.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4bee37cedea3099ac8b65a74904688d771f624c97097f0c67e65b163b3967b22
3
- size 87955987
 
 
 
 
chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b31772d7b492860c7d8a5bf5009e837e4210f36db02b577d200213ec74a1c6
3
+ size 74568000
{chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc → chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de}/header.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:595a4f1b655e01205b66b5f692d44a48da44acf9a2ad5155a223d082d235bae3
3
  size 100
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb0d9006c0a810bed3cf70ce96081931f4ca52fba11d05376a99d4e432d9d994
3
  size 100
{chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc → chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de}/index_metadata.pickle RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef4d9945fed0d6ed7a0b8a619af49e5c8524c1a61e2a61c5dfd554e6af9ffb3d
3
- size 1854390
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43b1ec3c7d4b11231e551e43c43dc6f8c6cbf3221517f7ed1e54afd70f6e08a0
3
+ size 346117
{chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc → chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de}/length.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6763f97c8f35167fe68a1f7b8b22a14a3e148614522e9df6ad82ac65c24c4cb0
3
- size 128000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b30b7d36428adb2def6746197d2a25c90b0dc6c7e0bcfd6216bfdc81dc6ad98
3
+ size 24000
{chroma-db-all_sources/c50946fb-91db-4b3a-81a8-507f4e24e0fc → chroma-db-langchain/a991ffa1-6102-416d-a561-877198e9f5de}/link_lists.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fdfa1a217488e83bd57b7d13985ce0f4379f577eec289316b442b3ebe7dbb79f
3
- size 277872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83cab85fce66e7f40c7b93609e7b34d9970f8dd7fb0ec8ed3ca9691f7d515b84
3
+ size 52220
{chroma-db-all_sources → chroma-db-langchain}/chroma.sqlite3 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8bc923548681b7c546e6e45d6dfd796876b67066118cf3fe6b22cfeca1524b2
3
- size 947904512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb702f4ed770cf0f0630d4d9c999de16409e95f0708cc6d4bc41f9b6758e0c0
3
+ size 223997952
chroma-db-langchain/document_dict_langchain.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9288ead475c396868d4046b709ba6b4704b469dc10d571d61e2ac4a651dc8360
3
+ size 9495017
chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff775114aa9ea2874506dc5fb42fb0cb40c8aba1d39a5ccc40c0d3e01fc617fe
3
+ size 74568000
chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6485506af204d2b936b1f28bc63bcc7b791d4b431bc168bdfef9290d9059fe73
3
+ size 100
chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/index_metadata.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e4a0ed52b4a277d65769ca116592388b62dc31871eabdb3504d84c656914321
3
+ size 346117
chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fec5cf06bc6bc8d7e43df5ec03e41faaf11b25a94375e31500d22aad8d9b19b3
3
+ size 24000
chroma-db-llama_index/c7e869e3-1822-4dde-8d40-a8f631ba43f7/link_lists.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e9702fd3fab5b9e0a9b3495b9051dcbec394bf49a94c84503c00f2c59468e2c
3
+ size 52152
all_sources_contextual_nodes.pkl → chroma-db-llama_index/chroma.sqlite3 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3fb01a8d69f1af5b8c9d863a0a40d109f812696a43a6ce2a3b420458be4bc49
3
- size 112785806
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f9f841426404c5901ac2f13ffc1c7224cb2783cf4e0276ffbc5783f1426cb29
3
+ size 205246464
chroma-db-llama_index/document_dict_llama_index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5153123cbc6d2e83c1d6b60d23dd5afee00bd9a4967143b9e8f30f1792c5e932
3
+ size 8954720
chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93df5543a015938eddd1f7c3cf53c35de01709be02c54836a47a1b445a39941c
3
+ size 24856000
chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5adccd0a9e9b2c539168b73e6cc6ce867211ec92bdfbe077126f0620285ad69d
3
+ size 100
chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/index_metadata.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dbbac2bbab35c9a2282f4d5edba22b4da61d44a03cd94a6dfed3a957ec84603
3
+ size 114057
chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:448e2b40f4fab352a4c0c747a4c13d28a753e48374f6f96c9cfbd8f153ea30f9
3
+ size 8000
chroma-db-openai_cookbooks/0b25dfdf-6d35-44aa-92ea-ba471d44a52c/link_lists.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eea6193ad5b15addc24b2bb8a6381d88976c55d7e8729fc78db5ab9909f782c8
3
+ size 17316
chroma-db-openai_cookbooks/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f306cdfca7f7d2899118eb12fb1fc1b7393ebf80f70e358340fe9d6ea87e33e0
3
+ size 83746816
chroma-db-openai_cookbooks/document_dict_openai_cookbooks.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3c410a0972b9c2dfa286ad03449d8024704fbcb0b313f126ae10dd1d7b94f21
3
+ size 3490619
chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b8d4b3825a7c7a773e22fa3eeef0e7d15a695f5c4183aeff5beb07741a68679
3
+ size 12428000
chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8a3ec48846fc6fdfaef19f5ed2508f0bf3da4a3c93b0f6b3dd21f0a22ec1026
3
+ size 100
chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c65d4f4f981c64a2613d4b82d32fcf22dca9ebfa2cfaffdd4e12e54e890a1d1
3
+ size 4000
chroma-db-peft/7f6a74f1-af06-461d-8abb-2b1728f320f7/link_lists.bin ADDED
File without changes
chroma-db-peft/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b0321f854c294da9564e7e90ccb11b3190bd3d900d4606250fb1ccbaabd83be
3
+ size 5226496
chroma-db-peft/document_dict_peft.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69ea3f661fbc9d85496d6cf77a09cb545998b1f0ebe4a8fb91865444dbfcffae
3
+ size 260392
chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71dacc58c9b86fda98eba379dabcb91f62ed3a10d381647faa10d0e43889ff4f
3
+ size 12428000
chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db3337c9290bd8362d7849233bb2ce47b0b5a48d1790b5db251bd3ecb56a8fd4
3
+ size 100
chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/index_metadata.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f2ff58c1cdba77e74cbd707994a2cceaddcb890e9f11ae38a6b1fae30af5e4e
3
+ size 56042
chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc19b1997119425765295aeab72d76faa6927d4f83985d328c26f20468d6cc76
3
+ size 4000
chroma-db-transformers/72747caf-b9b0-48d5-8712-4cf07905d824/link_lists.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8508e40fb725d5c517c803a091d55067abf0479de7fb605d36cdcfaa454a4eb
3
+ size 8148
chroma-db-transformers/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ece68c3b8a7c87f4b630e542127601194dcc4d97ac9d2a236938b575e33ae6
3
+ size 63442944
chroma-db-transformers/document_dict_transformers.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eb21c73e0cf7ef0970b615c66df67dae4e973befea9fdd22721dd69b0939231
3
+ size 3166114
chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b8d4b3825a7c7a773e22fa3eeef0e7d15a695f5c4183aeff5beb07741a68679
3
+ size 12428000
chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8a3ec48846fc6fdfaef19f5ed2508f0bf3da4a3c93b0f6b3dd21f0a22ec1026
3
+ size 100
chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b8d924c7d1367cea6fd3c8fa8df0be395d4f62bf898bf07df588aa3140d7b61
3
+ size 4000
chroma-db-trl/65e2350c-2bd4-46a8-b379-4c6561901fe1/link_lists.bin ADDED
File without changes
chroma-db-trl/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:066e040cdf71203d2d2f90a870854e9fd16418d5dc1229fccf52d2887cec1c5c
3
+ size 5292032
chroma-db-trl/document_dict_trl.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2cf47511ef464f04c458d950cae2ac158a10cca40a338bacb9e38351223375
3
+ size 264000
langchain_md_files/_templates/integration.mdx DELETED
@@ -1,60 +0,0 @@
1
- [comment: Please, a reference example here "docs/integrations/arxiv.md"]::
2
- [comment: Use this template to create a new .md file in "docs/integrations/"]::
3
-
4
- # Title_REPLACE_ME
5
-
6
- [comment: Only one Tile/H1 is allowed!]::
7
-
8
- >
9
- [comment: Description: After reading this description, a reader should decide if this integration is good enough to try/follow reading OR]::
10
- [comment: go to read the next integration doc. ]::
11
- [comment: Description should include a link to the source for follow reading.]::
12
-
13
- ## Installation and Setup
14
-
15
- [comment: Installation and Setup: All necessary additional package installations and setups for Tokens, etc]::
16
-
17
- ```bash
18
- pip install package_name_REPLACE_ME
19
- ```
20
-
21
- [comment: OR this text:]::
22
-
23
- There isn't any special setup for it.
24
-
25
- [comment: The next H2/## sections with names of the integration modules, like "LLM", "Text Embedding Models", etc]::
26
- [comment: see "Modules" in the "index.html" page]::
27
- [comment: Each H2 section should include a link to an example(s) and a Python code with the import of the integration class]::
28
- [comment: Below are several example sections. Remove all unnecessary sections. Add all necessary sections not provided here.]::
29
-
30
- ## LLM
31
-
32
- See a [usage example](/docs/integrations/llms/INCLUDE_REAL_NAME).
33
-
34
- ```python
35
- from langchain_community.llms import integration_class_REPLACE_ME
36
- ```
37
-
38
- ## Text Embedding Models
39
-
40
- See a [usage example](/docs/integrations/text_embedding/INCLUDE_REAL_NAME).
41
-
42
- ```python
43
- from langchain_community.embeddings import integration_class_REPLACE_ME
44
- ```
45
-
46
- ## Chat models
47
-
48
- See a [usage example](/docs/integrations/chat/INCLUDE_REAL_NAME).
49
-
50
- ```python
51
- from langchain_community.chat_models import integration_class_REPLACE_ME
52
- ```
53
-
54
- ## Document Loader
55
-
56
- See a [usage example](/docs/integrations/document_loaders/INCLUDE_REAL_NAME).
57
-
58
- ```python
59
- from langchain_community.document_loaders import integration_class_REPLACE_ME
60
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
langchain_md_files/additional_resources/arxiv_references.mdx DELETED
@@ -1,1101 +0,0 @@
1
- # arXiv
2
-
3
- LangChain implements the latest research in the field of Natural Language Processing.
4
- This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
5
- Templates, and Cookbooks.
6
-
7
- From the opposite direction, scientists use `LangChain` in research and reference it in the research papers.
8
-
9
- `arXiv` papers with references to:
10
- [LangChain](https://arxiv.org/search/?query=langchain&searchtype=all&source=header) | [LangGraph](https://arxiv.org/search/?query=langgraph&searchtype=all&source=header) | [LangSmith](https://arxiv.org/search/?query=langsmith&searchtype=all&source=header)
11
-
12
- ## Summary
13
-
14
- | arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
15
- |------------------|---------|-------------------|------------------------|
16
- | `2403.14403v2` [Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity](http://arxiv.org/abs/2403.14403v2) | Soyeong Jeong, Jinheon Baek, Sukmin Cho, et al. | 2024‑03‑21 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
17
- | `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024‑02‑06 | `Cookbook:` [Self-Discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
18
- | `2402.03367v2` [RAG-Fusion: a New Take on Retrieval-Augmented Generation](http://arxiv.org/abs/2402.03367v2) | Zackary Rackauckas | 2024‑01‑31 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
19
- | `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024‑01‑31 | `Cookbook:` [Raptor](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
20
- | `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024‑01‑29 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Cookbook:` [Langgraph Crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
21
- | `2401.08500v1` [Code Generation with AlphaCodium: From Prompt Engineering to Flow Engineering](http://arxiv.org/abs/2401.08500v1) | Tal Ridnik, Dedy Kredo, Itamar Friedman | 2024‑01‑16 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
22
- | `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024‑01‑08 | `Cookbook:` [Together Ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
23
- | `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023‑12‑11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
24
- | `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023‑11‑15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
25
- | `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023‑10‑17 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Cookbook:` [Langgraph Self Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
26
- | `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023‑10‑09 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [Stepback-Qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
27
- | `2307.15337v3` [Skeleton-of-Thought: Prompting LLMs for Efficient Parallel Generation](http://arxiv.org/abs/2307.15337v3) | Xuefei Ning, Zinan Lin, Zixuan Zhou, et al. | 2023‑07‑28 | `Template:` [skeleton-of-thought](https://python.langchain.com/docs/templates/skeleton-of-thought)
28
- | `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023‑07‑18 | `Cookbook:` [Semi Structured Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
29
- | `2307.03172v3` [Lost in the Middle: How Language Models Use Long Contexts](http://arxiv.org/abs/2307.03172v3) | Nelson F. Liu, Kevin Lin, John Hewitt, et al. | 2023‑07‑06 | `Docs:` [docs/how_to/long_context_reorder](https://python.langchain.com/docs/how_to/long_context_reorder)
30
- | `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023‑05‑23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [Rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
31
- | `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023‑05‑15 | `API:` [langchain_experimental.tot](https://python.langchain.com/api_reference/experimental/tot.html), `Cookbook:` [Tree Of Thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
32
- | `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023‑05‑06 | `Cookbook:` [Plan And Execute Agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
33
- | `2305.02156v1` [Zero-Shot Listwise Document Reranking with a Large Language Model](http://arxiv.org/abs/2305.02156v1) | Xueguang Ma, Xinyu Zhang, Ronak Pradeep, et al. | 2023‑05‑03 | `Docs:` [docs/how_to/contextual_compression](https://python.langchain.com/docs/how_to/contextual_compression), `API:` [langchain...LLMListwiseRerank](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#)
34
- | `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023‑04‑17 | `Cookbook:` [Semi Structured Multi Modal Rag Llama2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb), [Semi Structured And Multi Modal Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)
35
- | `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023‑04‑07 | `Cookbook:` [Generative Agents Interactive Simulacra Of Human Behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [Multiagent Bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
36
- | `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023‑03‑31 | `Cookbook:` [Camel Role Playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
37
- | `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023‑03‑30 | `API:` [langchain_experimental.autonomous_agents](https://python.langchain.com/api_reference/experimental/autonomous_agents.html), `Cookbook:` [Hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
38
- | `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023‑01‑24 | `API:` [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
39
- | `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022‑12‑20 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [Hypothetical Document Embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
40
- | `2212.08073v1` [Constitutional AI: Harmlessness from AI Feedback](http://arxiv.org/abs/2212.08073v1) | Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al. | 2022‑12‑15 | `Docs:` [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/docs/versions/migrating_chains/constitutional_chain)
41
- | `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022‑12‑12 | `API:` [langchain_experimental.fallacy_removal](https://python.langchain.com/api_reference/experimental/fallacy_removal.html)
42
- | `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022‑11‑25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
43
- | `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022‑11‑18 | `API:` [langchain_experimental.pal_chain](https://python.langchain.com/api_reference/experimental/pal_chain.html), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), `Cookbook:` [Program Aided Language Model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
44
- | `2210.11934v2` [An Analysis of Fusion Functions for Hybrid Retrieval](http://arxiv.org/abs/2210.11934v2) | Sebastian Bruch, Siyu Gai, Amir Ingber | 2022‑10‑21 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
45
- | `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022‑10‑06 | `Docs:` [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/docs/concepts), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
46
- | `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022‑09‑22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
47
- | `2205.13147v4` [Matryoshka Representation Learning](http://arxiv.org/abs/2205.13147v4) | Aditya Kusupati, Gantavya Bhatt, Aniket Rege, et al. | 2022‑05‑26 | `Docs:` [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
48
- | `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022‑05‑25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
49
- | `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022‑03‑15 | `Docs:` [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa), `API:` [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
50
- | `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022‑02‑01 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
51
- | `2112.01488v3` [ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction](http://arxiv.org/abs/2112.01488v3) | Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, et al. | 2021‑12‑02 | `Docs:` [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
52
- | `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021‑02‑26 | `API:` [langchain_experimental.open_clip](https://python.langchain.com/api_reference/experimental/open_clip.html)
53
- | `2005.14165v4` [Language Models are Few-Shot Learners](http://arxiv.org/abs/2005.14165v4) | Tom B. Brown, Benjamin Mann, Nick Ryder, et al. | 2020‑05‑28 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
54
- | `2005.11401v4` [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](http://arxiv.org/abs/2005.11401v4) | Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al. | 2020‑05‑22 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
55
- | `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019‑09‑11 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
56
-
57
- ## Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity
58
-
59
- - **Authors:** Soyeong Jeong, Jinheon Baek, Sukmin Cho, et al.
60
- - **arXiv id:** [2403.14403v2](http://arxiv.org/abs/2403.14403v2) **Published Date:** 2024-03-21
61
- - **LangChain:**
62
-
63
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
64
-
65
- **Abstract:** Retrieval-Augmented Large Language Models (LLMs), which incorporate the
66
- non-parametric knowledge from external knowledge bases into LLMs, have emerged
67
- as a promising approach to enhancing response accuracy in several tasks, such
68
- as Question-Answering (QA). However, even though there are various approaches
69
- dealing with queries of different complexities, they either handle simple
70
- queries with unnecessary computational overhead or fail to adequately address
71
- complex multi-step queries; yet, not all user requests fall into only one of
72
- the simple or complex categories. In this work, we propose a novel adaptive QA
73
- framework, that can dynamically select the most suitable strategy for
74
- (retrieval-augmented) LLMs from the simplest to the most sophisticated ones
75
- based on the query complexity. Also, this selection process is operationalized
76
- with a classifier, which is a smaller LM trained to predict the complexity
77
- level of incoming queries with automatically collected labels, obtained from
78
- actual predicted outcomes of models and inherent inductive biases in datasets.
79
- This approach offers a balanced strategy, seamlessly adapting between the
80
- iterative and single-step retrieval-augmented LLMs, as well as the no-retrieval
81
- methods, in response to a range of query complexities. We validate our model on
82
- a set of open-domain QA datasets, covering multiple query complexities, and
83
- show that ours enhances the overall efficiency and accuracy of QA systems,
84
- compared to relevant baselines including the adaptive retrieval approaches.
85
- Code is available at: https://github.com/starsuzi/Adaptive-RAG.
86
-
87
- ## Self-Discover: Large Language Models Self-Compose Reasoning Structures
88
-
89
- - **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.
90
- - **arXiv id:** [2402.03620v1](http://arxiv.org/abs/2402.03620v1) **Published Date:** 2024-02-06
91
- - **LangChain:**
92
-
93
- - **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
94
-
95
- **Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the
96
- task-intrinsic reasoning structures to tackle complex reasoning problems that
97
- are challenging for typical prompting methods. Core to the framework is a
98
- self-discovery process where LLMs select multiple atomic reasoning modules such
99
- as critical thinking and step-by-step thinking, and compose them into an
100
- explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER
101
- substantially improves GPT-4 and PaLM 2's performance on challenging reasoning
102
- benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as
103
- much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER
104
- outperforms inference-intensive methods such as CoT-Self-Consistency by more
105
- than 20%, while requiring 10-40x fewer inference compute. Finally, we show that
106
- the self-discovered reasoning structures are universally applicable across
107
- model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share
108
- commonalities with human reasoning patterns.
109
-
110
- ## RAG-Fusion: a New Take on Retrieval-Augmented Generation
111
-
112
- - **Authors:** Zackary Rackauckas
113
- - **arXiv id:** [2402.03367v2](http://arxiv.org/abs/2402.03367v2) **Published Date:** 2024-01-31
114
- - **LangChain:**
115
-
116
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
117
-
118
- **Abstract:** Infineon has identified a need for engineers, account managers, and customers
119
- to rapidly obtain product information. This problem is traditionally addressed
120
- with retrieval-augmented generation (RAG) chatbots, but in this study, I
121
- evaluated the use of the newly popularized RAG-Fusion method. RAG-Fusion
122
- combines RAG and reciprocal rank fusion (RRF) by generating multiple queries,
123
- reranking them with reciprocal scores and fusing the documents and scores.
124
- Through manually evaluating answers on accuracy, relevance, and
125
- comprehensiveness, I found that RAG-Fusion was able to provide accurate and
126
- comprehensive answers due to the generated queries contextualizing the original
127
- query from various perspectives. However, some answers strayed off topic when
128
- the generated queries' relevance to the original query is insufficient. This
129
- research marks significant progress in artificial intelligence (AI) and natural
130
- language processing (NLP) applications and demonstrates transformations in a
131
- global and multi-industry context.
132
-
133
- ## RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
134
-
135
- - **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.
136
- - **arXiv id:** [2401.18059v1](http://arxiv.org/abs/2401.18059v1) **Published Date:** 2024-01-31
137
- - **LangChain:**
138
-
139
- - **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
140
-
141
- **Abstract:** Retrieval-augmented language models can better adapt to changes in world
142
- state and incorporate long-tail knowledge. However, most existing methods
143
- retrieve only short contiguous chunks from a retrieval corpus, limiting
144
- holistic understanding of the overall document context. We introduce the novel
145
- approach of recursively embedding, clustering, and summarizing chunks of text,
146
- constructing a tree with differing levels of summarization from the bottom up.
147
- At inference time, our RAPTOR model retrieves from this tree, integrating
148
- information across lengthy documents at different levels of abstraction.
149
- Controlled experiments show that retrieval with recursive summaries offers
150
- significant improvements over traditional retrieval-augmented LMs on several
151
- tasks. On question-answering tasks that involve complex, multi-step reasoning,
152
- we show state-of-the-art results; for example, by coupling RAPTOR retrieval
153
- with the use of GPT-4, we can improve the best performance on the QuALITY
154
- benchmark by 20% in absolute accuracy.
155
-
156
- ## Corrective Retrieval Augmented Generation
157
-
158
- - **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.
159
- - **arXiv id:** [2401.15884v2](http://arxiv.org/abs/2401.15884v2) **Published Date:** 2024-01-29
160
- - **LangChain:**
161
-
162
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
163
- - **Cookbook:** [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
164
-
165
- **Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the
166
- accuracy of generated texts cannot be secured solely by the parametric
167
- knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a
168
- practicable complement to LLMs, it relies heavily on the relevance of retrieved
169
- documents, raising concerns about how the model behaves if retrieval goes
170
- wrong. To this end, we propose the Corrective Retrieval Augmented Generation
171
- (CRAG) to improve the robustness of generation. Specifically, a lightweight
172
- retrieval evaluator is designed to assess the overall quality of retrieved
173
- documents for a query, returning a confidence degree based on which different
174
- knowledge retrieval actions can be triggered. Since retrieval from static and
175
- limited corpora can only return sub-optimal documents, large-scale web searches
176
- are utilized as an extension for augmenting the retrieval results. Besides, a
177
- decompose-then-recompose algorithm is designed for retrieved documents to
178
- selectively focus on key information and filter out irrelevant information in
179
- them. CRAG is plug-and-play and can be seamlessly coupled with various
180
- RAG-based approaches. Experiments on four datasets covering short- and
181
- long-form generation tasks show that CRAG can significantly improve the
182
- performance of RAG-based approaches.
183
-
184
- ## Code Generation with AlphaCodium: From Prompt Engineering to Flow Engineering
185
-
186
- - **Authors:** Tal Ridnik, Dedy Kredo, Itamar Friedman
187
- - **arXiv id:** [2401.08500v1](http://arxiv.org/abs/2401.08500v1) **Published Date:** 2024-01-16
188
- - **LangChain:**
189
-
190
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
191
-
192
- **Abstract:** Code generation problems differ from common natural language problems - they
193
- require matching the exact syntax of the target language, identifying happy
194
- paths and edge cases, paying attention to numerous small details in the problem
195
- spec, and addressing other code-specific issues and requirements. Hence, many
196
- of the optimizations and tricks that have been successful in natural language
197
- generation may not be effective for code tasks. In this work, we propose a new
198
- approach to code generation by LLMs, which we call AlphaCodium - a test-based,
199
- multi-stage, code-oriented iterative flow, that improves the performances of
200
- LLMs on code problems. We tested AlphaCodium on a challenging code generation
201
- dataset called CodeContests, which includes competitive programming problems
202
- from platforms such as Codeforces. The proposed flow consistently and
203
- significantly improves results. On the validation set, for example, GPT-4
204
- accuracy (pass@5) increased from 19% with a single well-designed direct prompt
205
- to 44% with the AlphaCodium flow. Many of the principles and best practices
206
- acquired in this work, we believe, are broadly applicable to general code
207
- generation tasks. Full implementation is available at:
208
- https://github.com/Codium-ai/AlphaCodium
209
-
210
- ## Mixtral of Experts
211
-
212
- - **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.
213
- - **arXiv id:** [2401.04088v1](http://arxiv.org/abs/2401.04088v1) **Published Date:** 2024-01-08
214
- - **LangChain:**
215
-
216
- - **Cookbook:** [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
217
-
218
- **Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model.
219
- Mixtral has the same architecture as Mistral 7B, with the difference that each
220
- layer is composed of 8 feedforward blocks (i.e. experts). For every token, at
221
- each layer, a router network selects two experts to process the current state
222
- and combine their outputs. Even though each token only sees two experts, the
223
- selected experts can be different at each timestep. As a result, each token has
224
- access to 47B parameters, but only uses 13B active parameters during inference.
225
- Mixtral was trained with a context size of 32k tokens and it outperforms or
226
- matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular,
227
- Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and
228
- multilingual benchmarks. We also provide a model fine-tuned to follow
229
- instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo,
230
- Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both
231
- the base and instruct models are released under the Apache 2.0 license.
232
-
233
- ## Dense X Retrieval: What Retrieval Granularity Should We Use?
234
-
235
- - **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.
236
- - **arXiv id:** [2312.06648v2](http://arxiv.org/abs/2312.06648v2) **Published Date:** 2023-12-11
237
- - **LangChain:**
238
-
239
- - **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
240
-
241
- **Abstract:** Dense retrieval has become a prominent method to obtain relevant context or
242
- world knowledge in open-domain NLP tasks. When we use a learned dense retriever
243
- on a retrieval corpus at inference time, an often-overlooked design choice is
244
- the retrieval unit in which the corpus is indexed, e.g. document, passage, or
245
- sentence. We discover that the retrieval unit choice significantly impacts the
246
- performance of both retrieval and downstream tasks. Distinct from the typical
247
- approach of using passages or sentences, we introduce a novel retrieval unit,
248
- proposition, for dense retrieval. Propositions are defined as atomic
249
- expressions within text, each encapsulating a distinct factoid and presented in
250
- a concise, self-contained natural language format. We conduct an empirical
251
- comparison of different retrieval granularity. Our results reveal that
252
- proposition-based retrieval significantly outperforms traditional passage or
253
- sentence-based methods in dense retrieval. Moreover, retrieval by proposition
254
- also enhances the performance of downstream QA tasks, since the retrieved texts
255
- are more condensed with question-relevant information, reducing the need for
256
- lengthy input tokens and minimizing the inclusion of extraneous, irrelevant
257
- information.
258
-
259
- ## Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
260
-
261
- - **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.
262
- - **arXiv id:** [2311.09210v1](http://arxiv.org/abs/2311.09210v1) **Published Date:** 2023-11-15
263
- - **LangChain:**
264
-
265
- - **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
266
-
267
- **Abstract:** Retrieval-augmented language models (RALMs) represent a substantial
268
- advancement in the capabilities of large language models, notably in reducing
269
- factual hallucination by leveraging external knowledge sources. However, the
270
- reliability of the retrieved information is not always guaranteed. The
271
- retrieval of irrelevant data can lead to misguided responses, and potentially
272
- causing the model to overlook its inherent knowledge, even when it possesses
273
- adequate information to address the query. Moreover, standard RALMs often
274
- struggle to assess whether they possess adequate knowledge, both intrinsic and
275
- retrieved, to provide an accurate answer. In situations where knowledge is
276
- lacking, these systems should ideally respond with "unknown" when the answer is
277
- unattainable. In response to these challenges, we introduces Chain-of-Noting
278
- (CoN), a novel approach aimed at improving the robustness of RALMs in facing
279
- noisy, irrelevant documents and in handling unknown scenarios. The core idea of
280
- CoN is to generate sequential reading notes for retrieved documents, enabling a
281
- thorough evaluation of their relevance to the given question and integrating
282
- this information to formulate the final answer. We employed ChatGPT to create
283
- training data for CoN, which was subsequently trained on an LLaMa-2 7B model.
284
- Our experiments across four open-domain QA benchmarks show that RALMs equipped
285
- with CoN significantly outperform standard RALMs. Notably, CoN achieves an
286
- average improvement of +7.9 in EM score given entirely noisy retrieved
287
- documents and +10.5 in rejection rates for real-time questions that fall
288
- outside the pre-training knowledge scope.
289
-
290
- ## Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
291
-
292
- - **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.
293
- - **arXiv id:** [2310.11511v1](http://arxiv.org/abs/2310.11511v1) **Published Date:** 2023-10-17
294
- - **LangChain:**
295
-
296
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
297
- - **Cookbook:** [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
298
-
299
- **Abstract:** Despite their remarkable capabilities, large language models (LLMs) often
300
- produce responses containing factual inaccuracies due to their sole reliance on
301
- the parametric knowledge they encapsulate. Retrieval-Augmented Generation
302
- (RAG), an ad hoc approach that augments LMs with retrieval of relevant
303
- knowledge, decreases such issues. However, indiscriminately retrieving and
304
- incorporating a fixed number of retrieved passages, regardless of whether
305
- retrieval is necessary, or passages are relevant, diminishes LM versatility or
306
- can lead to unhelpful response generation. We introduce a new framework called
307
- Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM's
308
- quality and factuality through retrieval and self-reflection. Our framework
309
- trains a single arbitrary LM that adaptively retrieves passages on-demand, and
310
- generates and reflects on retrieved passages and its own generations using
311
- special tokens, called reflection tokens. Generating reflection tokens makes
312
- the LM controllable during the inference phase, enabling it to tailor its
313
- behavior to diverse task requirements. Experiments show that Self-RAG (7B and
314
- 13B parameters) significantly outperforms state-of-the-art LLMs and
315
- retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG
316
- outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA,
317
- reasoning and fact verification tasks, and it shows significant gains in
318
- improving factuality and citation accuracy for long-form generations relative
319
- to these models.
320
-
321
- ## Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
322
-
323
- - **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.
324
- - **arXiv id:** [2310.06117v2](http://arxiv.org/abs/2310.06117v2) **Published Date:** 2023-10-09
325
- - **LangChain:**
326
-
327
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
328
- - **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
329
- - **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
330
-
331
- **Abstract:** We present Step-Back Prompting, a simple prompting technique that enables
332
- LLMs to do abstractions to derive high-level concepts and first principles from
333
- instances containing specific details. Using the concepts and principles to
334
- guide reasoning, LLMs significantly improve their abilities in following a
335
- correct reasoning path towards the solution. We conduct experiments of
336
- Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe
337
- substantial performance gains on various challenging reasoning-intensive tasks
338
- including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back
339
- Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7%
340
- and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.
341
-
342
- ## Skeleton-of-Thought: Prompting LLMs for Efficient Parallel Generation
343
-
344
- - **Authors:** Xuefei Ning, Zinan Lin, Zixuan Zhou, et al.
345
- - **arXiv id:** [2307.15337v3](http://arxiv.org/abs/2307.15337v3) **Published Date:** 2023-07-28
346
- - **LangChain:**
347
-
348
- - **Template:** [skeleton-of-thought](https://python.langchain.com/docs/templates/skeleton-of-thought)
349
-
350
- **Abstract:** This work aims at decreasing the end-to-end generation latency of large
351
- language models (LLMs). One of the major causes of the high generation latency
352
- is the sequential decoding approach adopted by almost all state-of-the-art
353
- LLMs. In this work, motivated by the thinking and writing process of humans, we
354
- propose Skeleton-of-Thought (SoT), which first guides LLMs to generate the
355
- skeleton of the answer, and then conducts parallel API calls or batched
356
- decoding to complete the contents of each skeleton point in parallel. Not only
357
- does SoT provide considerable speed-ups across 12 LLMs, but it can also
358
- potentially improve the answer quality on several question categories. SoT is
359
- an initial attempt at data-centric optimization for inference efficiency, and
360
- showcases the potential of eliciting high-quality answers by explicitly
361
- planning the answer structure in language.
362
-
363
- ## Llama 2: Open Foundation and Fine-Tuned Chat Models
364
-
365
- - **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.
366
- - **arXiv id:** [2307.09288v2](http://arxiv.org/abs/2307.09288v2) **Published Date:** 2023-07-18
367
- - **LangChain:**
368
-
369
- - **Cookbook:** [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
370
-
371
- **Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and
372
- fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70
373
- billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for
374
- dialogue use cases. Our models outperform open-source chat models on most
375
- benchmarks we tested, and based on our human evaluations for helpfulness and
376
- safety, may be a suitable substitute for closed-source models. We provide a
377
- detailed description of our approach to fine-tuning and safety improvements of
378
- Llama 2-Chat in order to enable the community to build on our work and
379
- contribute to the responsible development of LLMs.
380
-
381
- ## Lost in the Middle: How Language Models Use Long Contexts
382
-
383
- - **Authors:** Nelson F. Liu, Kevin Lin, John Hewitt, et al.
384
- - **arXiv id:** [2307.03172v3](http://arxiv.org/abs/2307.03172v3) **Published Date:** 2023-07-06
385
- - **LangChain:**
386
-
387
- - **Documentation:** [docs/how_to/long_context_reorder](https://python.langchain.com/docs/how_to/long_context_reorder)
388
-
389
- **Abstract:** While recent language models have the ability to take long contexts as input,
390
- relatively little is known about how well they use longer context. We analyze
391
- the performance of language models on two tasks that require identifying
392
- relevant information in their input contexts: multi-document question answering
393
- and key-value retrieval. We find that performance can degrade significantly
394
- when changing the position of relevant information, indicating that current
395
- language models do not robustly make use of information in long input contexts.
396
- In particular, we observe that performance is often highest when relevant
397
- information occurs at the beginning or end of the input context, and
398
- significantly degrades when models must access relevant information in the
399
- middle of long contexts, even for explicitly long-context models. Our analysis
400
- provides a better understanding of how language models use their input context
401
- and provides new evaluation protocols for future long-context language models.
402
-
403
- ## Query Rewriting for Retrieval-Augmented Large Language Models
404
-
405
- - **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.
406
- - **arXiv id:** [2305.14283v3](http://arxiv.org/abs/2305.14283v3) **Published Date:** 2023-05-23
407
- - **LangChain:**
408
-
409
- - **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)
410
- - **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
411
-
412
- **Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the
413
- retrieve-then-read pipeline, making remarkable progress in knowledge-intensive
414
- tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of
415
- the previous retrieve-then-read for the retrieval-augmented LLMs from the
416
- perspective of the query rewriting. Unlike prior studies focusing on adapting
417
- either the retriever or the reader, our approach pays attention to the
418
- adaptation of the search query itself, for there is inevitably a gap between
419
- the input text and the needed knowledge in retrieval. We first prompt an LLM to
420
- generate the query, then use a web search engine to retrieve contexts.
421
- Furthermore, to better align the query to the frozen modules, we propose a
422
- trainable scheme for our pipeline. A small language model is adopted as a
423
- trainable rewriter to cater to the black-box LLM reader. The rewriter is
424
- trained using the feedback of the LLM reader by reinforcement learning.
425
- Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice
426
- QA. Experiments results show consistent performance improvement, indicating
427
- that our framework is proven effective and scalable, and brings a new framework
428
- for retrieval-augmented LLM.
429
-
430
- ## Large Language Model Guided Tree-of-Thought
431
-
432
- - **Authors:** Jieyi Long
433
- - **arXiv id:** [2305.08291v1](http://arxiv.org/abs/2305.08291v1) **Published Date:** 2023-05-15
434
- - **LangChain:**
435
-
436
- - **API Reference:** [langchain_experimental.tot](https://python.langchain.com/api_reference/experimental/tot.html)
437
- - **Cookbook:** [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
438
-
439
- **Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel
440
- approach aimed at improving the problem-solving capabilities of auto-regressive
441
- large language models (LLMs). The ToT technique is inspired by the human mind's
442
- approach for solving complex reasoning tasks through trial and error. In this
443
- process, the human mind explores the solution space through a tree-like thought
444
- process, allowing for backtracking when necessary. To implement ToT as a
445
- software system, we augment an LLM with additional modules including a prompter
446
- agent, a checker module, a memory module, and a ToT controller. In order to
447
- solve a given problem, these modules engage in a multi-round conversation with
448
- the LLM. The memory module records the conversation and state history of the
449
- problem solving process, which allows the system to backtrack to the previous
450
- steps of the thought-process and explore other directions from there. To verify
451
- the effectiveness of the proposed technique, we implemented a ToT-based solver
452
- for the Sudoku Puzzle. Experimental results show that the ToT framework can
453
- significantly increase the success rate of Sudoku puzzle solving. Our
454
- implementation of the ToT-based Sudoku solver is available on [GitHub](https://github.com/jieyilong/tree-of-thought-puzzle-solver).
455
-
456
- ## Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
457
-
458
- - **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.
459
- - **arXiv id:** [2305.04091v3](http://arxiv.org/abs/2305.04091v3) **Published Date:** 2023-05-06
460
- - **LangChain:**
461
-
462
- - **Cookbook:** [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
463
-
464
- **Abstract:** Large language models (LLMs) have recently been shown to deliver impressive
465
- performance in various NLP tasks. To tackle multi-step reasoning tasks,
466
- few-shot chain-of-thought (CoT) prompting includes a few manually crafted
467
- step-by-step reasoning demonstrations which enable LLMs to explicitly generate
468
- reasoning steps and improve their reasoning task accuracy. To eliminate the
469
- manual effort, Zero-shot-CoT concatenates the target problem statement with
470
- "Let's think step by step" as an input prompt to LLMs. Despite the success of
471
- Zero-shot-CoT, it still suffers from three pitfalls: calculation errors,
472
- missing-step errors, and semantic misunderstanding errors. To address the
473
- missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of
474
- two components: first, devising a plan to divide the entire task into smaller
475
- subtasks, and then carrying out the subtasks according to the plan. To address
476
- the calculation errors and improve the quality of generated reasoning steps, we
477
- extend PS prompting with more detailed instructions and derive PS+ prompting.
478
- We evaluate our proposed prompting strategy on ten datasets across three
479
- reasoning problems. The experimental results over GPT-3 show that our proposed
480
- zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets
481
- by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought
482
- Prompting, and has comparable performance with 8-shot CoT prompting on the math
483
- reasoning problem. The code can be found at
484
- https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting.
485
-
486
- ## Zero-Shot Listwise Document Reranking with a Large Language Model
487
-
488
- - **Authors:** Xueguang Ma, Xinyu Zhang, Ronak Pradeep, et al.
489
- - **arXiv id:** [2305.02156v1](http://arxiv.org/abs/2305.02156v1) **Published Date:** 2023-05-03
490
- - **LangChain:**
491
-
492
- - **Documentation:** [docs/how_to/contextual_compression](https://python.langchain.com/docs/how_to/contextual_compression)
493
- - **API Reference:** [langchain...LLMListwiseRerank](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#)
494
-
495
- **Abstract:** Supervised ranking methods based on bi-encoder or cross-encoder architectures
496
- have shown success in multi-stage text ranking tasks, but they require large
497
- amounts of relevance judgments as training data. In this work, we propose
498
- Listwise Reranker with a Large Language Model (LRL), which achieves strong
499
- reranking effectiveness without using any task-specific training data.
500
- Different from the existing pointwise ranking methods, where documents are
501
- scored independently and ranked according to the scores, LRL directly generates
502
- a reordered list of document identifiers given the candidate documents.
503
- Experiments on three TREC web search datasets demonstrate that LRL not only
504
- outperforms zero-shot pointwise methods when reranking first-stage retrieval
505
- results, but can also act as a final-stage reranker to improve the top-ranked
506
- results of a pointwise method for improved efficiency. Additionally, we apply
507
- our approach to subsets of MIRACL, a recent multilingual retrieval dataset,
508
- with results showing its potential to generalize across different languages.
509
-
510
- ## Visual Instruction Tuning
511
-
512
- - **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.
513
- - **arXiv id:** [2304.08485v2](http://arxiv.org/abs/2304.08485v2) **Published Date:** 2023-04-17
514
- - **LangChain:**
515
-
516
- - **Cookbook:** [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb), [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)
517
-
518
- **Abstract:** Instruction tuning large language models (LLMs) using machine-generated
519
- instruction-following data has improved zero-shot capabilities on new tasks,
520
- but the idea is less explored in the multimodal field. In this paper, we
521
- present the first attempt to use language-only GPT-4 to generate multimodal
522
- language-image instruction-following data. By instruction tuning on such
523
- generated data, we introduce LLaVA: Large Language and Vision Assistant, an
524
- end-to-end trained large multimodal model that connects a vision encoder and
525
- LLM for general-purpose visual and language understanding.Our early experiments
526
- show that LLaVA demonstrates impressive multimodel chat abilities, sometimes
527
- exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and
528
- yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal
529
- instruction-following dataset. When fine-tuned on Science QA, the synergy of
530
- LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make
531
- GPT-4 generated visual instruction tuning data, our model and code base
532
- publicly available.
533
-
534
- ## Generative Agents: Interactive Simulacra of Human Behavior
535
-
536
- - **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al.
537
- - **arXiv id:** [2304.03442v2](http://arxiv.org/abs/2304.03442v2) **Published Date:** 2023-04-07
538
- - **LangChain:**
539
-
540
- - **Cookbook:** [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
541
-
542
- **Abstract:** Believable proxies of human behavior can empower interactive applications
543
- ranging from immersive environments to rehearsal spaces for interpersonal
544
- communication to prototyping tools. In this paper, we introduce generative
545
- agents--computational software agents that simulate believable human behavior.
546
- Generative agents wake up, cook breakfast, and head to work; artists paint,
547
- while authors write; they form opinions, notice each other, and initiate
548
- conversations; they remember and reflect on days past as they plan the next
549
- day. To enable generative agents, we describe an architecture that extends a
550
- large language model to store a complete record of the agent's experiences
551
- using natural language, synthesize those memories over time into higher-level
552
- reflections, and retrieve them dynamically to plan behavior. We instantiate
553
- generative agents to populate an interactive sandbox environment inspired by
554
- The Sims, where end users can interact with a small town of twenty five agents
555
- using natural language. In an evaluation, these generative agents produce
556
- believable individual and emergent social behaviors: for example, starting with
557
- only a single user-specified notion that one agent wants to throw a Valentine's
558
- Day party, the agents autonomously spread invitations to the party over the
559
- next two days, make new acquaintances, ask each other out on dates to the
560
- party, and coordinate to show up for the party together at the right time. We
561
- demonstrate through ablation that the components of our agent
562
- architecture--observation, planning, and reflection--each contribute critically
563
- to the believability of agent behavior. By fusing large language models with
564
- computational, interactive agents, this work introduces architectural and
565
- interaction patterns for enabling believable simulations of human behavior.
566
-
567
- ## CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
568
-
569
- - **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.
570
- - **arXiv id:** [2303.17760v2](http://arxiv.org/abs/2303.17760v2) **Published Date:** 2023-03-31
571
- - **LangChain:**
572
-
573
- - **Cookbook:** [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
574
-
575
- **Abstract:** The rapid advancement of chat-based language models has led to remarkable
576
- progress in complex task-solving. However, their success heavily relies on
577
- human input to guide the conversation, which can be challenging and
578
- time-consuming. This paper explores the potential of building scalable
579
- techniques to facilitate autonomous cooperation among communicative agents, and
580
- provides insight into their "cognitive" processes. To address the challenges of
581
- achieving autonomous cooperation, we propose a novel communicative agent
582
- framework named role-playing. Our approach involves using inception prompting
583
- to guide chat agents toward task completion while maintaining consistency with
584
- human intentions. We showcase how role-playing can be used to generate
585
- conversational data for studying the behaviors and capabilities of a society of
586
- agents, providing a valuable resource for investigating conversational language
587
- models. In particular, we conduct comprehensive studies on
588
- instruction-following cooperation in multi-agent settings. Our contributions
589
- include introducing a novel communicative agent framework, offering a scalable
590
- approach for studying the cooperative behaviors and capabilities of multi-agent
591
- systems, and open-sourcing our library to support research on communicative
592
- agents and beyond: https://github.com/camel-ai/camel.
593
-
594
- ## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
595
-
596
- - **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.
597
- - **arXiv id:** [2303.17580v4](http://arxiv.org/abs/2303.17580v4) **Published Date:** 2023-03-30
598
- - **LangChain:**
599
-
600
- - **API Reference:** [langchain_experimental.autonomous_agents](https://python.langchain.com/api_reference/experimental/autonomous_agents.html)
601
- - **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
602
-
603
- **Abstract:** Solving complicated AI tasks with different domains and modalities is a key
604
- step toward artificial general intelligence. While there are numerous AI models
605
- available for various domains and modalities, they cannot handle complicated AI
606
- tasks autonomously. Considering large language models (LLMs) have exhibited
607
- exceptional abilities in language understanding, generation, interaction, and
608
- reasoning, we advocate that LLMs could act as a controller to manage existing
609
- AI models to solve complicated AI tasks, with language serving as a generic
610
- interface to empower this. Based on this philosophy, we present HuggingGPT, an
611
- LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI
612
- models in machine learning communities (e.g., Hugging Face) to solve AI tasks.
613
- Specifically, we use ChatGPT to conduct task planning when receiving a user
614
- request, select models according to their function descriptions available in
615
- Hugging Face, execute each subtask with the selected AI model, and summarize
616
- the response according to the execution results. By leveraging the strong
617
- language capability of ChatGPT and abundant AI models in Hugging Face,
618
- HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different
619
- modalities and domains and achieve impressive results in language, vision,
620
- speech, and other challenging tasks, which paves a new way towards the
621
- realization of artificial general intelligence.
622
-
623
- ## A Watermark for Large Language Models
624
-
625
- - **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
626
- - **arXiv id:** [2301.10226v4](http://arxiv.org/abs/2301.10226v4) **Published Date:** 2023-01-24
627
- - **LangChain:**
628
-
629
- - **API Reference:** [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
630
-
631
- **Abstract:** Potential harms of large language models can be mitigated by watermarking
632
- model output, i.e., embedding signals into generated text that are invisible to
633
- humans but algorithmically detectable from a short span of tokens. We propose a
634
- watermarking framework for proprietary language models. The watermark can be
635
- embedded with negligible impact on text quality, and can be detected using an
636
- efficient open-source algorithm without access to the language model API or
637
- parameters. The watermark works by selecting a randomized set of "green" tokens
638
- before a word is generated, and then softly promoting use of green tokens
639
- during sampling. We propose a statistical test for detecting the watermark with
640
- interpretable p-values, and derive an information-theoretic framework for
641
- analyzing the sensitivity of the watermark. We test the watermark using a
642
- multi-billion parameter model from the Open Pretrained Transformer (OPT)
643
- family, and discuss robustness and security.
644
-
645
- ## Precise Zero-Shot Dense Retrieval without Relevance Labels
646
-
647
- - **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.
648
- - **arXiv id:** [2212.10496v1](http://arxiv.org/abs/2212.10496v1) **Published Date:** 2022-12-20
649
- - **LangChain:**
650
-
651
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
652
- - **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
653
- - **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
654
- - **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
655
-
656
- **Abstract:** While dense retrieval has been shown effective and efficient across tasks and
657
- languages, it remains difficult to create effective fully zero-shot dense
658
- retrieval systems when no relevance label is available. In this paper, we
659
- recognize the difficulty of zero-shot learning and encoding relevance. Instead,
660
- we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a
661
- query, HyDE first zero-shot instructs an instruction-following language model
662
- (e.g. InstructGPT) to generate a hypothetical document. The document captures
663
- relevance patterns but is unreal and may contain false details. Then, an
664
- unsupervised contrastively learned encoder~(e.g. Contriever) encodes the
665
- document into an embedding vector. This vector identifies a neighborhood in the
666
- corpus embedding space, where similar real documents are retrieved based on
667
- vector similarity. This second step ground the generated document to the actual
668
- corpus, with the encoder's dense bottleneck filtering out the incorrect
669
- details. Our experiments show that HyDE significantly outperforms the
670
- state-of-the-art unsupervised dense retriever Contriever and shows strong
671
- performance comparable to fine-tuned retrievers, across various tasks (e.g. web
672
- search, QA, fact verification) and languages~(e.g. sw, ko, ja).
673
-
674
- ## Constitutional AI: Harmlessness from AI Feedback
675
-
676
- - **Authors:** Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al.
677
- - **arXiv id:** [2212.08073v1](http://arxiv.org/abs/2212.08073v1) **Published Date:** 2022-12-15
678
- - **LangChain:**
679
-
680
- - **Documentation:** [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/docs/versions/migrating_chains/constitutional_chain)
681
-
682
- **Abstract:** As AI systems become more capable, we would like to enlist their help to
683
- supervise other AIs. We experiment with methods for training a harmless AI
684
- assistant through self-improvement, without any human labels identifying
685
- harmful outputs. The only human oversight is provided through a list of rules
686
- or principles, and so we refer to the method as 'Constitutional AI'. The
687
- process involves both a supervised learning and a reinforcement learning phase.
688
- In the supervised phase we sample from an initial model, then generate
689
- self-critiques and revisions, and then finetune the original model on revised
690
- responses. In the RL phase, we sample from the finetuned model, use a model to
691
- evaluate which of the two samples is better, and then train a preference model
692
- from this dataset of AI preferences. We then train with RL using the preference
693
- model as the reward signal, i.e. we use 'RL from AI Feedback' (RLAIF). As a
694
- result we are able to train a harmless but non-evasive AI assistant that
695
- engages with harmful queries by explaining its objections to them. Both the SL
696
- and RL methods can leverage chain-of-thought style reasoning to improve the
697
- human-judged performance and transparency of AI decision making. These methods
698
- make it possible to control AI behavior more precisely and with far fewer human
699
- labels.
700
-
701
- ## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
702
-
703
- - **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.
704
- - **arXiv id:** [2212.07425v3](http://arxiv.org/abs/2212.07425v3) **Published Date:** 2022-12-12
705
- - **LangChain:**
706
-
707
- - **API Reference:** [langchain_experimental.fallacy_removal](https://python.langchain.com/api_reference/experimental/fallacy_removal.html)
708
-
709
- **Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been
710
- amplified in the Internet era. Given the volume of data and the subtlety of
711
- identifying violations of argumentation norms, supporting information analytics
712
- tasks, like content moderation, with trustworthy methods that can identify
713
- logical fallacies is essential. In this paper, we formalize prior theoretical
714
- work on logical fallacies into a comprehensive three-stage evaluation framework
715
- of detection, coarse-grained, and fine-grained classification. We adapt
716
- existing evaluation datasets for each stage of the evaluation. We employ three
717
- families of robust and explainable methods based on prototype reasoning,
718
- instance-based reasoning, and knowledge injection. The methods combine language
719
- models with background knowledge and explainable mechanisms. Moreover, we
720
- address data sparsity with strategies for data augmentation and curriculum
721
- learning. Our three-stage framework natively consolidates prior datasets and
722
- methods from existing tasks, like propaganda detection, serving as an
723
- overarching evaluation testbed. We extensively evaluate these methods on our
724
- datasets, focusing on their robustness and explainability. Our results provide
725
- insight into the strengths and weaknesses of the methods on different
726
- components and fallacy classes, indicating that fallacy identification is a
727
- challenging task that may require specialized forms of reasoning to capture
728
- various classes. We share our open-source code and data on GitHub to support
729
- further work on logical fallacy identification.
730
-
731
- ## Complementary Explanations for Effective In-Context Learning
732
-
733
- - **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.
734
- - **arXiv id:** [2211.13892v2](http://arxiv.org/abs/2211.13892v2) **Published Date:** 2022-11-25
735
- - **LangChain:**
736
-
737
- - **API Reference:** [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
738
-
739
- **Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
740
- learning from explanations in prompts, but there has been limited understanding
741
- of exactly how these explanations function or why they are effective. This work
742
- aims to better understand the mechanisms by which explanations are used for
743
- in-context learning. We first study the impact of two different factors on the
744
- performance of prompts with explanations: the computation trace (the way the
745
- solution is decomposed) and the natural language used to express the prompt. By
746
- perturbing explanations on three controlled tasks, we show that both factors
747
- contribute to the effectiveness of explanations. We further study how to form
748
- maximally effective sets of explanations for solving a given test query. We
749
- find that LLMs can benefit from the complementarity of the explanation set:
750
- diverse reasoning skills shown by different exemplars can lead to better
751
- performance. Therefore, we propose a maximal marginal relevance-based exemplar
752
- selection approach for constructing exemplar sets that are both relevant as
753
- well as complementary, which successfully improves the in-context learning
754
- performance across three real-world tasks on multiple LLMs.
755
-
756
- ## PAL: Program-aided Language Models
757
-
758
- - **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.
759
- - **arXiv id:** [2211.10435v2](http://arxiv.org/abs/2211.10435v2) **Published Date:** 2022-11-18
760
- - **LangChain:**
761
-
762
- - **API Reference:** [langchain_experimental.pal_chain](https://python.langchain.com/api_reference/experimental/pal_chain.html), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)
763
- - **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
764
-
765
- **Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
766
- to perform arithmetic and symbolic reasoning tasks, when provided with a few
767
- examples at test time ("few-shot prompting"). Much of this success can be
768
- attributed to prompting methods such as "chain-of-thought'', which employ LLMs
769
- for both understanding the problem description by decomposing it into steps, as
770
- well as solving each step of the problem. While LLMs seem to be adept at this
771
- sort of step-by-step decomposition, LLMs often make logical and arithmetic
772
- mistakes in the solution part, even when the problem is decomposed correctly.
773
- In this paper, we present Program-Aided Language models (PAL): a novel approach
774
- that uses the LLM to read natural language problems and generate programs as
775
- the intermediate reasoning steps, but offloads the solution step to a runtime
776
- such as a Python interpreter. With PAL, decomposing the natural language
777
- problem into runnable steps remains the only learning task for the LLM, while
778
- solving is delegated to the interpreter. We demonstrate this synergy between a
779
- neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and
780
- algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all
781
- these natural language reasoning tasks, generating code using an LLM and
782
- reasoning using a Python interpreter leads to more accurate results than much
783
- larger models. For example, PAL using Codex achieves state-of-the-art few-shot
784
- accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
785
- which uses chain-of-thought by absolute 15% top-1. Our code and data are
786
- publicly available at http://reasonwithpal.com/ .
787
-
788
- ## An Analysis of Fusion Functions for Hybrid Retrieval
789
-
790
- - **Authors:** Sebastian Bruch, Siyu Gai, Amir Ingber
791
- - **arXiv id:** [2210.11934v2](http://arxiv.org/abs/2210.11934v2) **Published Date:** 2022-10-21
792
- - **LangChain:**
793
-
794
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
795
-
796
- **Abstract:** We study hybrid search in text retrieval where lexical and semantic search
797
- are fused together with the intuition that the two are complementary in how
798
- they model relevance. In particular, we examine fusion by a convex combination
799
- (CC) of lexical and semantic scores, as well as the Reciprocal Rank Fusion
800
- (RRF) method, and identify their advantages and potential pitfalls. Contrary to
801
- existing studies, we find RRF to be sensitive to its parameters; that the
802
- learning of a CC fusion is generally agnostic to the choice of score
803
- normalization; that CC outperforms RRF in in-domain and out-of-domain settings;
804
- and finally, that CC is sample efficient, requiring only a small set of
805
- training examples to tune its only parameter to a target domain.
806
-
807
- ## ReAct: Synergizing Reasoning and Acting in Language Models
808
-
809
- - **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.
810
- - **arXiv id:** [2210.03629v3](http://arxiv.org/abs/2210.03629v3) **Published Date:** 2022-10-06
811
- - **LangChain:**
812
-
813
- - **Documentation:** [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/docs/concepts)
814
- - **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
815
-
816
- **Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
817
- across tasks in language understanding and interactive decision making, their
818
- abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g.
819
- action plan generation) have primarily been studied as separate topics. In this
820
- paper, we explore the use of LLMs to generate both reasoning traces and
821
- task-specific actions in an interleaved manner, allowing for greater synergy
822
- between the two: reasoning traces help the model induce, track, and update
823
- action plans as well as handle exceptions, while actions allow it to interface
824
- with external sources, such as knowledge bases or environments, to gather
825
- additional information. We apply our approach, named ReAct, to a diverse set of
826
- language and decision making tasks and demonstrate its effectiveness over
827
- state-of-the-art baselines, as well as improved human interpretability and
828
- trustworthiness over methods without reasoning or acting components.
829
- Concretely, on question answering (HotpotQA) and fact verification (Fever),
830
- ReAct overcomes issues of hallucination and error propagation prevalent in
831
- chain-of-thought reasoning by interacting with a simple Wikipedia API, and
832
- generates human-like task-solving trajectories that are more interpretable than
833
- baselines without reasoning traces. On two interactive decision making
834
- benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and
835
- reinforcement learning methods by an absolute success rate of 34% and 10%
836
- respectively, while being prompted with only one or two in-context examples.
837
- Project site with code: https://react-lm.github.io
838
-
839
- ## Deep Lake: a Lakehouse for Deep Learning
840
-
841
- - **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.
842
- - **arXiv id:** [2209.10785v2](http://arxiv.org/abs/2209.10785v2) **Published Date:** 2022-09-22
843
- - **LangChain:**
844
-
845
- - **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
846
-
847
- **Abstract:** Traditional data lakes provide critical data infrastructure for analytical
848
- workloads by enabling time travel, running SQL queries, ingesting data with
849
- ACID transactions, and visualizing petabyte-scale datasets on cloud storage.
850
- They allow organizations to break down data silos, unlock data-driven
851
- decision-making, improve operational efficiency, and reduce costs. However, as
852
- deep learning usage increases, traditional data lakes are not well-designed for
853
- applications such as natural language processing (NLP), audio processing,
854
- computer vision, and applications involving non-tabular datasets. This paper
855
- presents Deep Lake, an open-source lakehouse for deep learning applications
856
- developed at Activeloop. Deep Lake maintains the benefits of a vanilla data
857
- lake with one key difference: it stores complex data, such as images, videos,
858
- annotations, as well as tabular data, in the form of tensors and rapidly
859
- streams the data over the network to (a) Tensor Query Language, (b) in-browser
860
- visualization engine, or (c) deep learning frameworks without sacrificing GPU
861
- utilization. Datasets stored in Deep Lake can be accessed from PyTorch,
862
- TensorFlow, JAX, and integrate with numerous MLOps tools.
863
-
864
- ## Matryoshka Representation Learning
865
-
866
- - **Authors:** Aditya Kusupati, Gantavya Bhatt, Aniket Rege, et al.
867
- - **arXiv id:** [2205.13147v4](http://arxiv.org/abs/2205.13147v4) **Published Date:** 2022-05-26
868
- - **LangChain:**
869
-
870
- - **Documentation:** [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
871
-
872
- **Abstract:** Learned representations are a central component in modern ML systems, serving
873
- a multitude of downstream tasks. When training such representations, it is
874
- often the case that computational and statistical constraints for each
875
- downstream task are unknown. In this context rigid, fixed capacity
876
- representations can be either over or under-accommodating to the task at hand.
877
- This leads us to ask: can we design a flexible representation that can adapt to
878
- multiple downstream tasks with varying computational resources? Our main
879
- contribution is Matryoshka Representation Learning (MRL) which encodes
880
- information at different granularities and allows a single embedding to adapt
881
- to the computational constraints of downstream tasks. MRL minimally modifies
882
- existing representation learning pipelines and imposes no additional cost
883
- during inference and deployment. MRL learns coarse-to-fine representations that
884
- are at least as accurate and rich as independently trained low-dimensional
885
- representations. The flexibility within the learned Matryoshka Representations
886
- offer: (a) up to 14x smaller embedding size for ImageNet-1K classification at
887
- the same level of accuracy; (b) up to 14x real-world speed-ups for large-scale
888
- retrieval on ImageNet-1K and 4K; and (c) up to 2% accuracy improvements for
889
- long-tail few-shot classification, all while being as robust as the original
890
- representations. Finally, we show that MRL extends seamlessly to web-scale
891
- datasets (ImageNet, JFT) across various modalities -- vision (ViT, ResNet),
892
- vision + language (ALIGN) and language (BERT). MRL code and pretrained models
893
- are open-sourced at https://github.com/RAIVNLab/MRL.
894
-
895
- ## Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
896
-
897
- - **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
898
- - **arXiv id:** [2205.12654v1](http://arxiv.org/abs/2205.12654v1) **Published Date:** 2022-05-25
899
- - **LangChain:**
900
-
901
- - **API Reference:** [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
902
-
903
- **Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
904
- languages is challenging, in particular to cover the long tail of low-resource
905
- languages. A promising approach has been to train one-for-all multilingual
906
- models capable of cross-lingual transfer, but these models often suffer from
907
- insufficient capacity and interference between unrelated languages. Instead, we
908
- move away from this approach and focus on training multiple language (family)
909
- specific representations, but most prominently enable all languages to still be
910
- encoded in the same representational space. To achieve this, we focus on
911
- teacher-student training, allowing all encoders to be mutually compatible for
912
- bitext mining, and enabling fast learning of new languages. We introduce a new
913
- teacher-student training scheme which combines supervised and self-supervised
914
- training, allowing encoders to take advantage of monolingual training data,
915
- which is valuable in the low-resource setting.
916
- Our approach significantly outperforms the original LASER encoder. We study
917
- very low-resource languages and handle 50 African languages, many of which are
918
- not covered by any other model. For these languages, we train sentence
919
- encoders, mine bitexts, and validate the bitexts by training NMT systems.
920
-
921
- ## Evaluating the Text-to-SQL Capabilities of Large Language Models
922
-
923
- - **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
924
- - **arXiv id:** [2204.00498v1](http://arxiv.org/abs/2204.00498v1) **Published Date:** 2022-03-15
925
- - **LangChain:**
926
-
927
- - **Documentation:** [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa)
928
- - **API Reference:** [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
929
-
930
- **Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
931
- language model. We find that, without any finetuning, Codex is a strong
932
- baseline on the Spider benchmark; we also analyze the failure modes of Codex in
933
- this setting. Furthermore, we demonstrate on the GeoQuery and Scholar
934
- benchmarks that a small number of in-domain examples provided in the prompt
935
- enables Codex to perform better than state-of-the-art models finetuned on such
936
- few-shot examples.
937
-
938
- ## Locally Typical Sampling
939
-
940
- - **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.
941
- - **arXiv id:** [2202.00666v5](http://arxiv.org/abs/2202.00666v5) **Published Date:** 2022-02-01
942
- - **LangChain:**
943
-
944
- - **API Reference:** [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
945
-
946
- **Abstract:** Today's probabilistic language generators fall short when it comes to
947
- producing coherent and fluent text despite the fact that the underlying models
948
- perform well under standard metrics, e.g., perplexity. This discrepancy has
949
- puzzled the language generation community for the last few years. In this work,
950
- we posit that the abstraction of natural language generation as a discrete
951
- stochastic process--which allows for an information-theoretic analysis--can
952
- provide new insights into the behavior of probabilistic language generators,
953
- e.g., why high-probability texts can be dull or repetitive. Humans use language
954
- as a means of communicating information, aiming to do so in a simultaneously
955
- efficient and error-minimizing manner; in fact, psycholinguistics research
956
- suggests humans choose each word in a string with this subconscious goal in
957
- mind. We formally define the set of strings that meet this criterion: those for
958
- which each word has an information content close to the expected information
959
- content, i.e., the conditional entropy of our model. We then propose a simple
960
- and efficient procedure for enforcing this criterion when generating from
961
- probabilistic models, which we call locally typical sampling. Automatic and
962
- human evaluations show that, in comparison to nucleus and top-k sampling,
963
- locally typical sampling offers competitive performance (in both abstractive
964
- summarization and story generation) in terms of quality while consistently
965
- reducing degenerate repetitions.
966
-
967
- ## ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction
968
-
969
- - **Authors:** Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, et al.
970
- - **arXiv id:** [2112.01488v3](http://arxiv.org/abs/2112.01488v3) **Published Date:** 2021-12-02
971
- - **LangChain:**
972
-
973
- - **Documentation:** [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
974
-
975
- **Abstract:** Neural information retrieval (IR) has greatly advanced search and other
976
- knowledge-intensive language tasks. While many neural IR methods encode queries
977
- and documents into single-vector representations, late interaction models
978
- produce multi-vector representations at the granularity of each token and
979
- decompose relevance modeling into scalable token-level computations. This
980
- decomposition has been shown to make late interaction more effective, but it
981
- inflates the space footprint of these models by an order of magnitude. In this
982
- work, we introduce ColBERTv2, a retriever that couples an aggressive residual
983
- compression mechanism with a denoised supervision strategy to simultaneously
984
- improve the quality and space footprint of late interaction. We evaluate
985
- ColBERTv2 across a wide range of benchmarks, establishing state-of-the-art
986
- quality within and outside the training domain while reducing the space
987
- footprint of late interaction models by 6--10$\times$.
988
-
989
- ## Learning Transferable Visual Models From Natural Language Supervision
990
-
991
- - **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.
992
- - **arXiv id:** [2103.00020v1](http://arxiv.org/abs/2103.00020v1) **Published Date:** 2021-02-26
993
- - **LangChain:**
994
-
995
- - **API Reference:** [langchain_experimental.open_clip](https://python.langchain.com/api_reference/experimental/open_clip.html)
996
-
997
- **Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set
998
- of predetermined object categories. This restricted form of supervision limits
999
- their generality and usability since additional labeled data is needed to
1000
- specify any other visual concept. Learning directly from raw text about images
1001
- is a promising alternative which leverages a much broader source of
1002
- supervision. We demonstrate that the simple pre-training task of predicting
1003
- which caption goes with which image is an efficient and scalable way to learn
1004
- SOTA image representations from scratch on a dataset of 400 million (image,
1005
- text) pairs collected from the internet. After pre-training, natural language
1006
- is used to reference learned visual concepts (or describe new ones) enabling
1007
- zero-shot transfer of the model to downstream tasks. We study the performance
1008
- of this approach by benchmarking on over 30 different existing computer vision
1009
- datasets, spanning tasks such as OCR, action recognition in videos,
1010
- geo-localization, and many types of fine-grained object classification. The
1011
- model transfers non-trivially to most tasks and is often competitive with a
1012
- fully supervised baseline without the need for any dataset specific training.
1013
- For instance, we match the accuracy of the original ResNet-50 on ImageNet
1014
- zero-shot without needing to use any of the 1.28 million training examples it
1015
- was trained on. We release our code and pre-trained model weights at
1016
- https://github.com/OpenAI/CLIP.
1017
-
1018
- ## Language Models are Few-Shot Learners
1019
-
1020
- - **Authors:** Tom B. Brown, Benjamin Mann, Nick Ryder, et al.
1021
- - **arXiv id:** [2005.14165v4](http://arxiv.org/abs/2005.14165v4) **Published Date:** 2020-05-28
1022
- - **LangChain:**
1023
-
1024
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
1025
-
1026
- **Abstract:** Recent work has demonstrated substantial gains on many NLP tasks and
1027
- benchmarks by pre-training on a large corpus of text followed by fine-tuning on
1028
- a specific task. While typically task-agnostic in architecture, this method
1029
- still requires task-specific fine-tuning datasets of thousands or tens of
1030
- thousands of examples. By contrast, humans can generally perform a new language
1031
- task from only a few examples or from simple instructions - something which
1032
- current NLP systems still largely struggle to do. Here we show that scaling up
1033
- language models greatly improves task-agnostic, few-shot performance, sometimes
1034
- even reaching competitiveness with prior state-of-the-art fine-tuning
1035
- approaches. Specifically, we train GPT-3, an autoregressive language model with
1036
- 175 billion parameters, 10x more than any previous non-sparse language model,
1037
- and test its performance in the few-shot setting. For all tasks, GPT-3 is
1038
- applied without any gradient updates or fine-tuning, with tasks and few-shot
1039
- demonstrations specified purely via text interaction with the model. GPT-3
1040
- achieves strong performance on many NLP datasets, including translation,
1041
- question-answering, and cloze tasks, as well as several tasks that require
1042
- on-the-fly reasoning or domain adaptation, such as unscrambling words, using a
1043
- novel word in a sentence, or performing 3-digit arithmetic. At the same time,
1044
- we also identify some datasets where GPT-3's few-shot learning still struggles,
1045
- as well as some datasets where GPT-3 faces methodological issues related to
1046
- training on large web corpora. Finally, we find that GPT-3 can generate samples
1047
- of news articles which human evaluators have difficulty distinguishing from
1048
- articles written by humans. We discuss broader societal impacts of this finding
1049
- and of GPT-3 in general.
1050
-
1051
- ## Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks
1052
-
1053
- - **Authors:** Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al.
1054
- - **arXiv id:** [2005.11401v4](http://arxiv.org/abs/2005.11401v4) **Published Date:** 2020-05-22
1055
- - **LangChain:**
1056
-
1057
- - **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
1058
-
1059
- **Abstract:** Large pre-trained language models have been shown to store factual knowledge
1060
- in their parameters, and achieve state-of-the-art results when fine-tuned on
1061
- downstream NLP tasks. However, their ability to access and precisely manipulate
1062
- knowledge is still limited, and hence on knowledge-intensive tasks, their
1063
- performance lags behind task-specific architectures. Additionally, providing
1064
- provenance for their decisions and updating their world knowledge remain open
1065
- research problems. Pre-trained models with a differentiable access mechanism to
1066
- explicit non-parametric memory can overcome this issue, but have so far been
1067
- only investigated for extractive downstream tasks. We explore a general-purpose
1068
- fine-tuning recipe for retrieval-augmented generation (RAG) -- models which
1069
- combine pre-trained parametric and non-parametric memory for language
1070
- generation. We introduce RAG models where the parametric memory is a
1071
- pre-trained seq2seq model and the non-parametric memory is a dense vector index
1072
- of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG
1073
- formulations, one which conditions on the same retrieved passages across the
1074
- whole generated sequence, the other can use different passages per token. We
1075
- fine-tune and evaluate our models on a wide range of knowledge-intensive NLP
1076
- tasks and set the state-of-the-art on three open domain QA tasks, outperforming
1077
- parametric seq2seq models and task-specific retrieve-and-extract architectures.
1078
- For language generation tasks, we find that RAG models generate more specific,
1079
- diverse and factual language than a state-of-the-art parametric-only seq2seq
1080
- baseline.
1081
-
1082
- ## CTRL: A Conditional Transformer Language Model for Controllable Generation
1083
-
1084
- - **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
1085
- - **arXiv id:** [1909.05858v2](http://arxiv.org/abs/1909.05858v2) **Published Date:** 2019-09-11
1086
- - **LangChain:**
1087
-
1088
- - **API Reference:** [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
1089
-
1090
- **Abstract:** Large-scale language models show promising text generation capabilities, but
1091
- users cannot easily control particular aspects of the generated text. We
1092
- release CTRL, a 1.63 billion-parameter conditional transformer language model,
1093
- trained to condition on control codes that govern style, content, and
1094
- task-specific behavior. Control codes were derived from structure that
1095
- naturally co-occurs with raw text, preserving the advantages of unsupervised
1096
- learning while providing more explicit control over text generation. These
1097
- codes also allow CTRL to predict which parts of the training data are most
1098
- likely given a sequence. This provides a potential method for analyzing large
1099
- amounts of data via model-based source attribution. We have released multiple
1100
- full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
1101
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
langchain_md_files/additional_resources/dependents.mdx DELETED
@@ -1,554 +0,0 @@
1
- # Dependents
2
-
3
- Dependents stats for `langchain-ai/langchain`
4
-
5
- [![](https://img.shields.io/static/v1?label=Used%20by&message=41717&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
6
- [![](https://img.shields.io/static/v1?label=Used%20by%20(public)&message=538&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
7
- [![](https://img.shields.io/static/v1?label=Used%20by%20(private)&message=41179&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
8
-
9
-
10
- [update: `2023-12-08`; only dependent repositories with Stars > 100]
11
-
12
-
13
- | Repository | Stars |
14
- | :-------- | -----: |
15
- |[AntonOsika/gpt-engineer](https://github.com/AntonOsika/gpt-engineer) | 46514 |
16
- |[imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 44439 |
17
- |[LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 35906 |
18
- |[hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 35528 |
19
- |[moymix/TaskMatrix](https://github.com/moymix/TaskMatrix) | 34342 |
20
- |[geekan/MetaGPT](https://github.com/geekan/MetaGPT) | 31126 |
21
- |[streamlit/streamlit](https://github.com/streamlit/streamlit) | 28911 |
22
- |[reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 27833 |
23
- |[StanGirard/quivr](https://github.com/StanGirard/quivr) | 26032 |
24
- |[OpenBB-finance/OpenBBTerminal](https://github.com/OpenBB-finance/OpenBBTerminal) | 24946 |
25
- |[run-llama/llama_index](https://github.com/run-llama/llama_index) | 24859 |
26
- |[jmorganca/ollama](https://github.com/jmorganca/ollama) | 20849 |
27
- |[openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 20249 |
28
- |[chatchat-space/Langchain-Chatchat](https://github.com/chatchat-space/Langchain-Chatchat) | 19305 |
29
- |[mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 19172 |
30
- |[PromtEngineer/localGPT](https://github.com/PromtEngineer/localGPT) | 17528 |
31
- |[cube-js/cube](https://github.com/cube-js/cube) | 16575 |
32
- |[mlflow/mlflow](https://github.com/mlflow/mlflow) | 16000 |
33
- |[mudler/LocalAI](https://github.com/mudler/LocalAI) | 14067 |
34
- |[logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 13679 |
35
- |[GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 13648 |
36
- |[arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 13423 |
37
- |[openai/evals](https://github.com/openai/evals) | 12649 |
38
- |[airbytehq/airbyte](https://github.com/airbytehq/airbyte) | 12460 |
39
- |[langgenius/dify](https://github.com/langgenius/dify) | 11859 |
40
- |[databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 10672 |
41
- |[AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 9437 |
42
- |[langchain-ai/langchainjs](https://github.com/langchain-ai/langchainjs) | 9227 |
43
- |[gventuri/pandas-ai](https://github.com/gventuri/pandas-ai) | 9203 |
44
- |[aws/amazon-sagemaker-examples](https://github.com/aws/amazon-sagemaker-examples) | 9079 |
45
- |[h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 8945 |
46
- |[PipedreamHQ/pipedream](https://github.com/PipedreamHQ/pipedream) | 7550 |
47
- |[bentoml/OpenLLM](https://github.com/bentoml/OpenLLM) | 6957 |
48
- |[THUDM/ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6801 |
49
- |[microsoft/promptflow](https://github.com/microsoft/promptflow) | 6776 |
50
- |[cpacker/MemGPT](https://github.com/cpacker/MemGPT) | 6642 |
51
- |[joshpxyne/gpt-migrate](https://github.com/joshpxyne/gpt-migrate) | 6482 |
52
- |[zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 6037 |
53
- |[embedchain/embedchain](https://github.com/embedchain/embedchain) | 6023 |
54
- |[mage-ai/mage-ai](https://github.com/mage-ai/mage-ai) | 6019 |
55
- |[assafelovic/gpt-researcher](https://github.com/assafelovic/gpt-researcher) | 5936 |
56
- |[sweepai/sweep](https://github.com/sweepai/sweep) | 5855 |
57
- |[wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 5766 |
58
- |[zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 5710 |
59
- |[pdm-project/pdm](https://github.com/pdm-project/pdm) | 5665 |
60
- |[GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 5568 |
61
- |[gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 5507 |
62
- |[Shaunwei/RealChar](https://github.com/Shaunwei/RealChar) | 5501 |
63
- |[facebookresearch/llama-recipes](https://github.com/facebookresearch/llama-recipes) | 5477 |
64
- |[serge-chat/serge](https://github.com/serge-chat/serge) | 5221 |
65
- |[run-llama/rags](https://github.com/run-llama/rags) | 4916 |
66
- |[openchatai/OpenChat](https://github.com/openchatai/OpenChat) | 4870 |
67
- |[danswer-ai/danswer](https://github.com/danswer-ai/danswer) | 4774 |
68
- |[langchain-ai/opengpts](https://github.com/langchain-ai/opengpts) | 4709 |
69
- |[postgresml/postgresml](https://github.com/postgresml/postgresml) | 4639 |
70
- |[MineDojo/Voyager](https://github.com/MineDojo/Voyager) | 4582 |
71
- |[intel-analytics/BigDL](https://github.com/intel-analytics/BigDL) | 4581 |
72
- |[yihong0618/xiaogpt](https://github.com/yihong0618/xiaogpt) | 4359 |
73
- |[RayVentura/ShortGPT](https://github.com/RayVentura/ShortGPT) | 4357 |
74
- |[Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 4317 |
75
- |[madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 4289 |
76
- |[apache/nifi](https://github.com/apache/nifi) | 4098 |
77
- |[langchain-ai/chat-langchain](https://github.com/langchain-ai/chat-langchain) | 4091 |
78
- |[aiwaves-cn/agents](https://github.com/aiwaves-cn/agents) | 4073 |
79
- |[krishnaik06/The-Grand-Complete-Data-Science-Materials](https://github.com/krishnaik06/The-Grand-Complete-Data-Science-Materials) | 4065 |
80
- |[khoj-ai/khoj](https://github.com/khoj-ai/khoj) | 4016 |
81
- |[Azure/azure-sdk-for-python](https://github.com/Azure/azure-sdk-for-python) | 3941 |
82
- |[PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 3915 |
83
- |[OpenBMB/ToolBench](https://github.com/OpenBMB/ToolBench) | 3799 |
84
- |[marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 3771 |
85
- |[kyegomez/tree-of-thoughts](https://github.com/kyegomez/tree-of-thoughts) | 3688 |
86
- |[Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 3543 |
87
- |[llm-workflow-engine/llm-workflow-engine](https://github.com/llm-workflow-engine/llm-workflow-engine) | 3515 |
88
- |[shroominic/codeinterpreter-api](https://github.com/shroominic/codeinterpreter-api) | 3425 |
89
- |[openchatai/OpenCopilot](https://github.com/openchatai/OpenCopilot) | 3418 |
90
- |[josStorer/RWKV-Runner](https://github.com/josStorer/RWKV-Runner) | 3297 |
91
- |[whitead/paper-qa](https://github.com/whitead/paper-qa) | 3280 |
92
- |[homanp/superagent](https://github.com/homanp/superagent) | 3258 |
93
- |[ParisNeo/lollms-webui](https://github.com/ParisNeo/lollms-webui) | 3199 |
94
- |[OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 3099 |
95
- |[project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 3090 |
96
- |[OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 2989 |
97
- |[xlang-ai/OpenAgents](https://github.com/xlang-ai/OpenAgents) | 2825 |
98
- |[dataelement/bisheng](https://github.com/dataelement/bisheng) | 2797 |
99
- |[Mintplex-Labs/anything-llm](https://github.com/Mintplex-Labs/anything-llm) | 2784 |
100
- |[OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 2734 |
101
- |[run-llama/llama-hub](https://github.com/run-llama/llama-hub) | 2721 |
102
- |[SamurAIGPT/EmbedAI](https://github.com/SamurAIGPT/EmbedAI) | 2647 |
103
- |[NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 2637 |
104
- |[X-D-Lab/LangChain-ChatGLM-Webui](https://github.com/X-D-Lab/LangChain-ChatGLM-Webui) | 2532 |
105
- |[GerevAI/gerev](https://github.com/GerevAI/gerev) | 2517 |
106
- |[keephq/keep](https://github.com/keephq/keep) | 2448 |
107
- |[yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 2397 |
108
- |[OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 2324 |
109
- |[IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 2241 |
110
- |[YiVal/YiVal](https://github.com/YiVal/YiVal) | 2232 |
111
- |[jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 2189 |
112
- |[Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 2136 |
113
- |[microsoft/TaskWeaver](https://github.com/microsoft/TaskWeaver) | 2126 |
114
- |[hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 2083 |
115
- |[FlagOpen/FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) | 2053 |
116
- |[paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1999 |
117
- |[hegelai/prompttools](https://github.com/hegelai/prompttools) | 1984 |
118
- |[mckinsey/vizro](https://github.com/mckinsey/vizro) | 1951 |
119
- |[vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1868 |
120
- |[dot-agent/openAMS](https://github.com/dot-agent/openAMS) | 1796 |
121
- |[explodinggradients/ragas](https://github.com/explodinggradients/ragas) | 1766 |
122
- |[AI-Citizen/SolidGPT](https://github.com/AI-Citizen/SolidGPT) | 1761 |
123
- |[Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1696 |
124
- |[run-llama/sec-insights](https://github.com/run-llama/sec-insights) | 1654 |
125
- |[avinashkranjan/Amazing-Python-Scripts](https://github.com/avinashkranjan/Amazing-Python-Scripts) | 1635 |
126
- |[microsoft/WhatTheHack](https://github.com/microsoft/WhatTheHack) | 1629 |
127
- |[noahshinn/reflexion](https://github.com/noahshinn/reflexion) | 1625 |
128
- |[psychic-api/psychic](https://github.com/psychic-api/psychic) | 1618 |
129
- |[Forethought-Technologies/AutoChain](https://github.com/Forethought-Technologies/AutoChain) | 1611 |
130
- |[pinterest/querybook](https://github.com/pinterest/querybook) | 1586 |
131
- |[refuel-ai/autolabel](https://github.com/refuel-ai/autolabel) | 1553 |
132
- |[jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 1537 |
133
- |[jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1522 |
134
- |[agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1493 |
135
- |[ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1484 |
136
- |[greshake/llm-security](https://github.com/greshake/llm-security) | 1483 |
137
- |[promptfoo/promptfoo](https://github.com/promptfoo/promptfoo) | 1480 |
138
- |[milvus-io/bootcamp](https://github.com/milvus-io/bootcamp) | 1477 |
139
- |[richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 1475 |
140
- |[melih-unsal/DemoGPT](https://github.com/melih-unsal/DemoGPT) | 1428 |
141
- |[YORG-AI/Open-Assistant](https://github.com/YORG-AI/Open-Assistant) | 1419 |
142
- |[101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 1416 |
143
- |[jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1408 |
144
- |[mmz-001/knowledge_gpt](https://github.com/mmz-001/knowledge_gpt) | 1398 |
145
- |[intel/intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers) | 1387 |
146
- |[Azure/azureml-examples](https://github.com/Azure/azureml-examples) | 1385 |
147
- |[lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1367 |
148
- |[eyurtsev/kor](https://github.com/eyurtsev/kor) | 1355 |
149
- |[xusenlinzy/api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) | 1325 |
150
- |[griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 1323 |
151
- |[SuperDuperDB/superduperdb](https://github.com/SuperDuperDB/superduperdb) | 1290 |
152
- |[cofactoryai/textbase](https://github.com/cofactoryai/textbase) | 1284 |
153
- |[psychic-api/rag-stack](https://github.com/psychic-api/rag-stack) | 1260 |
154
- |[filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 1250 |
155
- |[nod-ai/SHARK](https://github.com/nod-ai/SHARK) | 1237 |
156
- |[pluralsh/plural](https://github.com/pluralsh/plural) | 1234 |
157
- |[cheshire-cat-ai/core](https://github.com/cheshire-cat-ai/core) | 1194 |
158
- |[LC1332/Chat-Haruhi-Suzumiya](https://github.com/LC1332/Chat-Haruhi-Suzumiya) | 1184 |
159
- |[poe-platform/server-bot-quick-start](https://github.com/poe-platform/server-bot-quick-start) | 1182 |
160
- |[microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 1180 |
161
- |[juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1171 |
162
- |[visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 1156 |
163
- |[alejandro-ao/ask-multiple-pdfs](https://github.com/alejandro-ao/ask-multiple-pdfs) | 1153 |
164
- |[ThousandBirdsInc/chidori](https://github.com/ThousandBirdsInc/chidori) | 1152 |
165
- |[irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 1137 |
166
- |[SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 1083 |
167
- |[ray-project/llm-applications](https://github.com/ray-project/llm-applications) | 1080 |
168
- |[run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 1072 |
169
- |[jiran214/GPT-vup](https://github.com/jiran214/GPT-vup) | 1041 |
170
- |[MetaGLM/FinGLM](https://github.com/MetaGLM/FinGLM) | 1035 |
171
- |[peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 1020 |
172
- |[Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 991 |
173
- |[langchain-ai/langserve](https://github.com/langchain-ai/langserve) | 983 |
174
- |[THUDM/AgentTuning](https://github.com/THUDM/AgentTuning) | 976 |
175
- |[rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 975 |
176
- |[codeacme17/examor](https://github.com/codeacme17/examor) | 964 |
177
- |[all-in-aigc/gpts-works](https://github.com/all-in-aigc/gpts-works) | 946 |
178
- |[Ikaros-521/AI-Vtuber](https://github.com/Ikaros-521/AI-Vtuber) | 946 |
179
- |[microsoft/Llama-2-Onnx](https://github.com/microsoft/Llama-2-Onnx) | 898 |
180
- |[cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 895 |
181
- |[ricklamers/shell-ai](https://github.com/ricklamers/shell-ai) | 893 |
182
- |[modelscope/modelscope-agent](https://github.com/modelscope/modelscope-agent) | 893 |
183
- |[seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 886 |
184
- |[ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 880 |
185
- |[kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference](https://github.com/kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference) | 872 |
186
- |[corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 846 |
187
- |[hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 841 |
188
- |[kreneskyp/ix](https://github.com/kreneskyp/ix) | 821 |
189
- |[Link-AGI/AutoAgents](https://github.com/Link-AGI/AutoAgents) | 820 |
190
- |[truera/trulens](https://github.com/truera/trulens) | 794 |
191
- |[Dataherald/dataherald](https://github.com/Dataherald/dataherald) | 788 |
192
- |[sunlabuiuc/PyHealth](https://github.com/sunlabuiuc/PyHealth) | 783 |
193
- |[jondurbin/airoboros](https://github.com/jondurbin/airoboros) | 783 |
194
- |[pyspark-ai/pyspark-ai](https://github.com/pyspark-ai/pyspark-ai) | 782 |
195
- |[confident-ai/deepeval](https://github.com/confident-ai/deepeval) | 780 |
196
- |[billxbf/ReWOO](https://github.com/billxbf/ReWOO) | 777 |
197
- |[langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent) | 776 |
198
- |[akshata29/entaoai](https://github.com/akshata29/entaoai) | 771 |
199
- |[LambdaLabsML/examples](https://github.com/LambdaLabsML/examples) | 770 |
200
- |[getmetal/motorhead](https://github.com/getmetal/motorhead) | 768 |
201
- |[Dicklesworthstone/swiss_army_llama](https://github.com/Dicklesworthstone/swiss_army_llama) | 757 |
202
- |[ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 757 |
203
- |[msoedov/langcorn](https://github.com/msoedov/langcorn) | 754 |
204
- |[e-johnstonn/BriefGPT](https://github.com/e-johnstonn/BriefGPT) | 753 |
205
- |[microsoft/sample-app-aoai-chatGPT](https://github.com/microsoft/sample-app-aoai-chatGPT) | 749 |
206
- |[explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 731 |
207
- |[MiuLab/Taiwan-LLM](https://github.com/MiuLab/Taiwan-LLM) | 716 |
208
- |[whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 702 |
209
- |[Azure-Samples/openai](https://github.com/Azure-Samples/openai) | 692 |
210
- |[iusztinpaul/hands-on-llms](https://github.com/iusztinpaul/hands-on-llms) | 687 |
211
- |[safevideo/autollm](https://github.com/safevideo/autollm) | 682 |
212
- |[OpenGenerativeAI/GenossGPT](https://github.com/OpenGenerativeAI/GenossGPT) | 669 |
213
- |[NoDataFound/hackGPT](https://github.com/NoDataFound/hackGPT) | 663 |
214
- |[AILab-CVC/GPT4Tools](https://github.com/AILab-CVC/GPT4Tools) | 662 |
215
- |[langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 657 |
216
- |[yvann-ba/Robby-chatbot](https://github.com/yvann-ba/Robby-chatbot) | 639 |
217
- |[alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 635 |
218
- |[amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 630 |
219
- |[microsoft/PodcastCopilot](https://github.com/microsoft/PodcastCopilot) | 621 |
220
- |[aws-samples/aws-genai-llm-chatbot](https://github.com/aws-samples/aws-genai-llm-chatbot) | 616 |
221
- |[NeumTry/NeumAI](https://github.com/NeumTry/NeumAI) | 605 |
222
- |[namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 599 |
223
- |[plastic-labs/tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 595 |
224
- |[marimo-team/marimo](https://github.com/marimo-team/marimo) | 591 |
225
- |[yakami129/VirtualWife](https://github.com/yakami129/VirtualWife) | 586 |
226
- |[xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 584 |
227
- |[jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 573 |
228
- |[dgarnitz/vectorflow](https://github.com/dgarnitz/vectorflow) | 568 |
229
- |[yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 564 |
230
- |[daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 563 |
231
- |[traceloop/openllmetry](https://github.com/traceloop/openllmetry) | 559 |
232
- |[Agenta-AI/agenta](https://github.com/Agenta-AI/agenta) | 546 |
233
- |[michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 545 |
234
- |[jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 544 |
235
- |[mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 533 |
236
- |[marella/chatdocs](https://github.com/marella/chatdocs) | 532 |
237
- |[opentensor/bittensor](https://github.com/opentensor/bittensor) | 532 |
238
- |[DjangoPeng/openai-quickstart](https://github.com/DjangoPeng/openai-quickstart) | 527 |
239
- |[freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 517 |
240
- |[sidhq/Multi-GPT](https://github.com/sidhq/Multi-GPT) | 515 |
241
- |[alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 514 |
242
- |[sajjadium/ctf-archives](https://github.com/sajjadium/ctf-archives) | 507 |
243
- |[continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 502 |
244
- |[steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 494 |
245
- |[mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 493 |
246
- |[langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 492 |
247
- |[logan-markewich/llama_index_starter_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 483 |
248
- |[datawhalechina/llm-universe](https://github.com/datawhalechina/llm-universe) | 475 |
249
- |[leondz/garak](https://github.com/leondz/garak) | 464 |
250
- |[RedisVentures/ArXivChatGuru](https://github.com/RedisVentures/ArXivChatGuru) | 461 |
251
- |[Anil-matcha/Chatbase](https://github.com/Anil-matcha/Chatbase) | 455 |
252
- |[Aiyu-awa/luna-ai](https://github.com/Aiyu-awa/luna-ai) | 450 |
253
- |[DataDog/dd-trace-py](https://github.com/DataDog/dd-trace-py) | 450 |
254
- |[Azure-Samples/miyagi](https://github.com/Azure-Samples/miyagi) | 449 |
255
- |[poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 447 |
256
- |[onlyphantom/llm-python](https://github.com/onlyphantom/llm-python) | 446 |
257
- |[junruxiong/IncarnaMind](https://github.com/junruxiong/IncarnaMind) | 441 |
258
- |[CarperAI/OpenELM](https://github.com/CarperAI/OpenELM) | 441 |
259
- |[daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 437 |
260
- |[showlab/VLog](https://github.com/showlab/VLog) | 436 |
261
- |[wandb/weave](https://github.com/wandb/weave) | 420 |
262
- |[QwenLM/Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) | 419 |
263
- |[huchenxucs/ChatDB](https://github.com/huchenxucs/ChatDB) | 416 |
264
- |[jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 411 |
265
- |[monarch-initiative/ontogpt](https://github.com/monarch-initiative/ontogpt) | 408 |
266
- |[mallorbc/Finetune_LLMs](https://github.com/mallorbc/Finetune_LLMs) | 406 |
267
- |[JayZeeDesign/researcher-gpt](https://github.com/JayZeeDesign/researcher-gpt) | 405 |
268
- |[rsaryev/talk-codebase](https://github.com/rsaryev/talk-codebase) | 401 |
269
- |[langchain-ai/langsmith-cookbook](https://github.com/langchain-ai/langsmith-cookbook) | 398 |
270
- |[mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 398 |
271
- |[morpheuslord/GPT_Vuln-analyzer](https://github.com/morpheuslord/GPT_Vuln-analyzer) | 391 |
272
- |[MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 387 |
273
- |[JohnSnowLabs/langtest](https://github.com/JohnSnowLabs/langtest) | 384 |
274
- |[mrwadams/attackgen](https://github.com/mrwadams/attackgen) | 381 |
275
- |[codefuse-ai/Test-Agent](https://github.com/codefuse-ai/Test-Agent) | 380 |
276
- |[personoids/personoids-lite](https://github.com/personoids/personoids-lite) | 379 |
277
- |[mosaicml/examples](https://github.com/mosaicml/examples) | 378 |
278
- |[steamship-packages/langchain-production-starter](https://github.com/steamship-packages/langchain-production-starter) | 370 |
279
- |[FlagAI-Open/Aquila2](https://github.com/FlagAI-Open/Aquila2) | 365 |
280
- |[Mintplex-Labs/vector-admin](https://github.com/Mintplex-Labs/vector-admin) | 365 |
281
- |[NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 357 |
282
- |[BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 354 |
283
- |[lilacai/lilac](https://github.com/lilacai/lilac) | 352 |
284
- |[preset-io/promptimize](https://github.com/preset-io/promptimize) | 351 |
285
- |[yuanjie-ai/ChatLLM](https://github.com/yuanjie-ai/ChatLLM) | 347 |
286
- |[andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 346 |
287
- |[zhoudaquan/ChatAnything](https://github.com/zhoudaquan/ChatAnything) | 343 |
288
- |[rgomezcasas/dotfiles](https://github.com/rgomezcasas/dotfiles) | 343 |
289
- |[tigerlab-ai/tiger](https://github.com/tigerlab-ai/tiger) | 342 |
290
- |[HumanSignal/label-studio-ml-backend](https://github.com/HumanSignal/label-studio-ml-backend) | 334 |
291
- |[nasa-petal/bidara](https://github.com/nasa-petal/bidara) | 334 |
292
- |[momegas/megabots](https://github.com/momegas/megabots) | 334 |
293
- |[Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 330 |
294
- |[CambioML/pykoi](https://github.com/CambioML/pykoi) | 326 |
295
- |[Nuggt-dev/Nuggt](https://github.com/Nuggt-dev/Nuggt) | 326 |
296
- |[wandb/edu](https://github.com/wandb/edu) | 326 |
297
- |[Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 324 |
298
- |[sugarforever/LangChain-Tutorials](https://github.com/sugarforever/LangChain-Tutorials) | 322 |
299
- |[liangwq/Chatglm_lora_multi-gpu](https://github.com/liangwq/Chatglm_lora_multi-gpu) | 321 |
300
- |[ur-whitelab/chemcrow-public](https://github.com/ur-whitelab/chemcrow-public) | 320 |
301
- |[itamargol/openai](https://github.com/itamargol/openai) | 318 |
302
- |[gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 304 |
303
- |[SpecterOps/Nemesis](https://github.com/SpecterOps/Nemesis) | 302 |
304
- |[facebookresearch/personal-timeline](https://github.com/facebookresearch/personal-timeline) | 302 |
305
- |[hnawaz007/pythondataanalysis](https://github.com/hnawaz007/pythondataanalysis) | 301 |
306
- |[Chainlit/cookbook](https://github.com/Chainlit/cookbook) | 300 |
307
- |[airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 300 |
308
- |[GPT-Fathom/GPT-Fathom](https://github.com/GPT-Fathom/GPT-Fathom) | 299 |
309
- |[kaarthik108/snowChat](https://github.com/kaarthik108/snowChat) | 299 |
310
- |[kyegomez/swarms](https://github.com/kyegomez/swarms) | 296 |
311
- |[LangStream/langstream](https://github.com/LangStream/langstream) | 295 |
312
- |[genia-dev/GeniA](https://github.com/genia-dev/GeniA) | 294 |
313
- |[shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 291 |
314
- |[TsinghuaDatabaseGroup/DB-GPT](https://github.com/TsinghuaDatabaseGroup/DB-GPT) | 290 |
315
- |[conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 283 |
316
- |[sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 283 |
317
- |[AutoPackAI/beebot](https://github.com/AutoPackAI/beebot) | 282 |
318
- |[pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 282 |
319
- |[gkamradt/LLMTest_NeedleInAHaystack](https://github.com/gkamradt/LLMTest_NeedleInAHaystack) | 280 |
320
- |[gustavz/DataChad](https://github.com/gustavz/DataChad) | 280 |
321
- |[Safiullah-Rahu/CSV-AI](https://github.com/Safiullah-Rahu/CSV-AI) | 278 |
322
- |[hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 275 |
323
- |[AkshitIreddy/Interactive-LLM-Powered-NPCs](https://github.com/AkshitIreddy/Interactive-LLM-Powered-NPCs) | 268 |
324
- |[ennucore/clippinator](https://github.com/ennucore/clippinator) | 267 |
325
- |[artitw/text2text](https://github.com/artitw/text2text) | 264 |
326
- |[anarchy-ai/LLM-VM](https://github.com/anarchy-ai/LLM-VM) | 263 |
327
- |[wpydcr/LLM-Kit](https://github.com/wpydcr/LLM-Kit) | 262 |
328
- |[streamlit/llm-examples](https://github.com/streamlit/llm-examples) | 262 |
329
- |[paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 262 |
330
- |[yym68686/ChatGPT-Telegram-Bot](https://github.com/yym68686/ChatGPT-Telegram-Bot) | 261 |
331
- |[PradipNichite/Youtube-Tutorials](https://github.com/PradipNichite/Youtube-Tutorials) | 259 |
332
- |[radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 259 |
333
- |[ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 259 |
334
- |[ml6team/fondant](https://github.com/ml6team/fondant) | 254 |
335
- |[bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 254 |
336
- |[rahulnyk/knowledge_graph](https://github.com/rahulnyk/knowledge_graph) | 253 |
337
- |[recalign/RecAlign](https://github.com/recalign/RecAlign) | 248 |
338
- |[hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 248 |
339
- |[fetchai/uAgents](https://github.com/fetchai/uAgents) | 247 |
340
- |[arthur-ai/bench](https://github.com/arthur-ai/bench) | 247 |
341
- |[miaoshouai/miaoshouai-assistant](https://github.com/miaoshouai/miaoshouai-assistant) | 246 |
342
- |[RoboCoachTechnologies/GPT-Synthesizer](https://github.com/RoboCoachTechnologies/GPT-Synthesizer) | 244 |
343
- |[langchain-ai/web-explorer](https://github.com/langchain-ai/web-explorer) | 242 |
344
- |[kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 242 |
345
- |[PJLab-ADG/DriveLikeAHuman](https://github.com/PJLab-ADG/DriveLikeAHuman) | 241 |
346
- |[stepanogil/autonomous-hr-chatbot](https://github.com/stepanogil/autonomous-hr-chatbot) | 238 |
347
- |[WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 236 |
348
- |[nexus-stc/stc](https://github.com/nexus-stc/stc) | 235 |
349
- |[yeagerai/genworlds](https://github.com/yeagerai/genworlds) | 235 |
350
- |[Gentopia-AI/Gentopia](https://github.com/Gentopia-AI/Gentopia) | 235 |
351
- |[alphasecio/langchain-examples](https://github.com/alphasecio/langchain-examples) | 235 |
352
- |[grumpyp/aixplora](https://github.com/grumpyp/aixplora) | 232 |
353
- |[shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 232 |
354
- |[darrenburns/elia](https://github.com/darrenburns/elia) | 231 |
355
- |[orgexyz/BlockAGI](https://github.com/orgexyz/BlockAGI) | 231 |
356
- |[handrew/browserpilot](https://github.com/handrew/browserpilot) | 226 |
357
- |[su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 225 |
358
- |[nicknochnack/LangchainDocuments](https://github.com/nicknochnack/LangchainDocuments) | 225 |
359
- |[dbpunk-labs/octogen](https://github.com/dbpunk-labs/octogen) | 224 |
360
- |[langchain-ai/weblangchain](https://github.com/langchain-ai/weblangchain) | 222 |
361
- |[CL-lau/SQL-GPT](https://github.com/CL-lau/SQL-GPT) | 222 |
362
- |[alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 221 |
363
- |[showlab/UniVTG](https://github.com/showlab/UniVTG) | 220 |
364
- |[edreisMD/plugnplai](https://github.com/edreisMD/plugnplai) | 219 |
365
- |[hardbyte/qabot](https://github.com/hardbyte/qabot) | 216 |
366
- |[microsoft/azure-openai-in-a-day-workshop](https://github.com/microsoft/azure-openai-in-a-day-workshop) | 215 |
367
- |[Azure-Samples/chat-with-your-data-solution-accelerator](https://github.com/Azure-Samples/chat-with-your-data-solution-accelerator) | 214 |
368
- |[amadad/agentcy](https://github.com/amadad/agentcy) | 213 |
369
- |[snexus/llm-search](https://github.com/snexus/llm-search) | 212 |
370
- |[afaqueumer/DocQA](https://github.com/afaqueumer/DocQA) | 206 |
371
- |[plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 205 |
372
- |[yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 205 |
373
- |[benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 205 |
374
- |[voxel51/voxelgpt](https://github.com/voxel51/voxelgpt) | 204 |
375
- |[jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 204 |
376
- |[emarco177/ice_breaker](https://github.com/emarco177/ice_breaker) | 204 |
377
- |[tencentmusic/supersonic](https://github.com/tencentmusic/supersonic) | 202 |
378
- |[Azure-Samples/azure-search-power-skills](https://github.com/Azure-Samples/azure-search-power-skills) | 202 |
379
- |[blob42/Instrukt](https://github.com/blob42/Instrukt) | 201 |
380
- |[langchain-ai/langsmith-sdk](https://github.com/langchain-ai/langsmith-sdk) | 200 |
381
- |[SamPink/dev-gpt](https://github.com/SamPink/dev-gpt) | 200 |
382
- |[ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators) | 198 |
383
- |[KMnO4-zx/huanhuan-chat](https://github.com/KMnO4-zx/huanhuan-chat) | 196 |
384
- |[Azure-Samples/jp-azureopenai-samples](https://github.com/Azure-Samples/jp-azureopenai-samples) | 192 |
385
- |[hongbo-miao/hongbomiao.com](https://github.com/hongbo-miao/hongbomiao.com) | 190 |
386
- |[CakeCrusher/openplugin](https://github.com/CakeCrusher/openplugin) | 190 |
387
- |[PaddlePaddle/ERNIE-Bot-SDK](https://github.com/PaddlePaddle/ERNIE-Bot-SDK) | 189 |
388
- |[retr0reg/Ret2GPT](https://github.com/retr0reg/Ret2GPT) | 189 |
389
- |[AmineDiro/cria](https://github.com/AmineDiro/cria) | 187 |
390
- |[lancedb/vectordb-recipes](https://github.com/lancedb/vectordb-recipes) | 186 |
391
- |[vaibkumr/prompt-optimizer](https://github.com/vaibkumr/prompt-optimizer) | 185 |
392
- |[aws-ia/ecs-blueprints](https://github.com/aws-ia/ecs-blueprints) | 184 |
393
- |[ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 183 |
394
- |[MuhammadMoinFaisal/LargeLanguageModelsProjects](https://github.com/MuhammadMoinFaisal/LargeLanguageModelsProjects) | 182 |
395
- |[shauryr/S2QA](https://github.com/shauryr/S2QA) | 181 |
396
- |[summarizepaper/summarizepaper](https://github.com/summarizepaper/summarizepaper) | 180 |
397
- |[NomaDamas/RAGchain](https://github.com/NomaDamas/RAGchain) | 179 |
398
- |[pnkvalavala/repochat](https://github.com/pnkvalavala/repochat) | 179 |
399
- |[ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 177 |
400
- |[fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 177 |
401
- |[langchain-ai/text-split-explorer](https://github.com/langchain-ai/text-split-explorer) | 175 |
402
- |[iMagist486/ElasticSearch-Langchain-Chatglm2](https://github.com/iMagist486/ElasticSearch-Langchain-Chatglm2) | 175 |
403
- |[limaoyi1/Auto-PPT](https://github.com/limaoyi1/Auto-PPT) | 175 |
404
- |[Open-Swarm-Net/GPT-Swarm](https://github.com/Open-Swarm-Net/GPT-Swarm) | 175 |
405
- |[morpheuslord/HackBot](https://github.com/morpheuslord/HackBot) | 174 |
406
- |[v7labs/benchllm](https://github.com/v7labs/benchllm) | 174 |
407
- |[Coding-Crashkurse/Langchain-Full-Course](https://github.com/Coding-Crashkurse/Langchain-Full-Course) | 174 |
408
- |[dongyh20/Octopus](https://github.com/dongyh20/Octopus) | 173 |
409
- |[kimtth/azure-openai-llm-vector-langchain](https://github.com/kimtth/azure-openai-llm-vector-langchain) | 173 |
410
- |[mayooear/private-chatbot-mpt30b-langchain](https://github.com/mayooear/private-chatbot-mpt30b-langchain) | 173 |
411
- |[zilliztech/akcio](https://github.com/zilliztech/akcio) | 172 |
412
- |[jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 172 |
413
- |[ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 172 |
414
- |[joaomdmoura/CrewAI](https://github.com/joaomdmoura/CrewAI) | 170 |
415
- |[katanaml/llm-mistral-invoice-cpu](https://github.com/katanaml/llm-mistral-invoice-cpu) | 170 |
416
- |[chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 170 |
417
- |[mudler/LocalAGI](https://github.com/mudler/LocalAGI) | 167 |
418
- |[dssjon/biblos](https://github.com/dssjon/biblos) | 165 |
419
- |[kjappelbaum/gptchem](https://github.com/kjappelbaum/gptchem) | 165 |
420
- |[xxw1995/chatglm3-finetune](https://github.com/xxw1995/chatglm3-finetune) | 164 |
421
- |[ArjanCodes/examples](https://github.com/ArjanCodes/examples) | 163 |
422
- |[AIAnytime/Llama2-Medical-Chatbot](https://github.com/AIAnytime/Llama2-Medical-Chatbot) | 163 |
423
- |[RCGAI/SimplyRetrieve](https://github.com/RCGAI/SimplyRetrieve) | 162 |
424
- |[langchain-ai/langchain-teacher](https://github.com/langchain-ai/langchain-teacher) | 162 |
425
- |[menloparklab/falcon-langchain](https://github.com/menloparklab/falcon-langchain) | 162 |
426
- |[flurb18/AgentOoba](https://github.com/flurb18/AgentOoba) | 162 |
427
- |[homanp/vercel-langchain](https://github.com/homanp/vercel-langchain) | 161 |
428
- |[jiran214/langup-ai](https://github.com/jiran214/langup-ai) | 160 |
429
- |[JorisdeJong123/7-Days-of-LangChain](https://github.com/JorisdeJong123/7-Days-of-LangChain) | 160 |
430
- |[GoogleCloudPlatform/data-analytics-golden-demo](https://github.com/GoogleCloudPlatform/data-analytics-golden-demo) | 159 |
431
- |[positive666/Prompt-Can-Anything](https://github.com/positive666/Prompt-Can-Anything) | 159 |
432
- |[luisroque/large_laguage_models](https://github.com/luisroque/large_laguage_models) | 159 |
433
- |[mlops-for-all/mlops-for-all.github.io](https://github.com/mlops-for-all/mlops-for-all.github.io) | 158 |
434
- |[wandb/wandbot](https://github.com/wandb/wandbot) | 158 |
435
- |[elastic/elasticsearch-labs](https://github.com/elastic/elasticsearch-labs) | 157 |
436
- |[shroominic/funcchain](https://github.com/shroominic/funcchain) | 157 |
437
- |[deeppavlov/dream](https://github.com/deeppavlov/dream) | 156 |
438
- |[mluogh/eastworld](https://github.com/mluogh/eastworld) | 154 |
439
- |[georgesung/llm_qlora](https://github.com/georgesung/llm_qlora) | 154 |
440
- |[RUC-GSAI/YuLan-Rec](https://github.com/RUC-GSAI/YuLan-Rec) | 153 |
441
- |[KylinC/ChatFinance](https://github.com/KylinC/ChatFinance) | 152 |
442
- |[Dicklesworthstone/llama2_aided_tesseract](https://github.com/Dicklesworthstone/llama2_aided_tesseract) | 152 |
443
- |[c0sogi/LLMChat](https://github.com/c0sogi/LLMChat) | 152 |
444
- |[eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 152 |
445
- |[ErikBjare/gptme](https://github.com/ErikBjare/gptme) | 152 |
446
- |[Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 152 |
447
- |[RoboCoachTechnologies/ROScribe](https://github.com/RoboCoachTechnologies/ROScribe) | 151 |
448
- |[Aggregate-Intellect/sherpa](https://github.com/Aggregate-Intellect/sherpa) | 151 |
449
- |[3Alan/DocsMind](https://github.com/3Alan/DocsMind) | 151 |
450
- |[tangqiaoyu/ToolAlpaca](https://github.com/tangqiaoyu/ToolAlpaca) | 150 |
451
- |[kulltc/chatgpt-sql](https://github.com/kulltc/chatgpt-sql) | 150 |
452
- |[mallahyari/drqa](https://github.com/mallahyari/drqa) | 150 |
453
- |[MedalCollector/Orator](https://github.com/MedalCollector/Orator) | 149 |
454
- |[Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 149 |
455
- |[realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 148 |
456
- |[ssheng/BentoChain](https://github.com/ssheng/BentoChain) | 148 |
457
- |[solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 147 |
458
- |[aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 147 |
459
- |[Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 146 |
460
- |[menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 146 |
461
- |[trancethehuman/entities-extraction-web-scraper](https://github.com/trancethehuman/entities-extraction-web-scraper) | 144 |
462
- |[peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 144 |
463
- |[grumpyp/chroma-langchain-tutorial](https://github.com/grumpyp/chroma-langchain-tutorial) | 144 |
464
- |[gh18l/CrawlGPT](https://github.com/gh18l/CrawlGPT) | 142 |
465
- |[langchain-ai/langchain-aws-template](https://github.com/langchain-ai/langchain-aws-template) | 142 |
466
- |[yasyf/summ](https://github.com/yasyf/summ) | 141 |
467
- |[petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 141 |
468
- |[hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 140 |
469
- |[jina-ai/fastapi-serve](https://github.com/jina-ai/fastapi-serve) | 139 |
470
- |[zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 139 |
471
- |[jlonge4/local_llama](https://github.com/jlonge4/local_llama) | 139 |
472
- |[smyja/blackmaria](https://github.com/smyja/blackmaria) | 138 |
473
- |[ChuloAI/BrainChulo](https://github.com/ChuloAI/BrainChulo) | 137 |
474
- |[log1stics/voice-generator-webui](https://github.com/log1stics/voice-generator-webui) | 137 |
475
- |[davila7/file-gpt](https://github.com/davila7/file-gpt) | 137 |
476
- |[dcaribou/transfermarkt-datasets](https://github.com/dcaribou/transfermarkt-datasets) | 136 |
477
- |[ciare-robotics/world-creator](https://github.com/ciare-robotics/world-creator) | 135 |
478
- |[Undertone0809/promptulate](https://github.com/Undertone0809/promptulate) | 134 |
479
- |[fixie-ai/fixie-examples](https://github.com/fixie-ai/fixie-examples) | 134 |
480
- |[run-llama/ai-engineer-workshop](https://github.com/run-llama/ai-engineer-workshop) | 133 |
481
- |[definitive-io/code-indexer-loop](https://github.com/definitive-io/code-indexer-loop) | 131 |
482
- |[mortium91/langchain-assistant](https://github.com/mortium91/langchain-assistant) | 131 |
483
- |[baidubce/bce-qianfan-sdk](https://github.com/baidubce/bce-qianfan-sdk) | 130 |
484
- |[Ngonie-x/langchain_csv](https://github.com/Ngonie-x/langchain_csv) | 130 |
485
- |[IvanIsCoding/ResuLLMe](https://github.com/IvanIsCoding/ResuLLMe) | 130 |
486
- |[AnchoringAI/anchoring-ai](https://github.com/AnchoringAI/anchoring-ai) | 129 |
487
- |[Azure/business-process-automation](https://github.com/Azure/business-process-automation) | 128 |
488
- |[athina-ai/athina-sdk](https://github.com/athina-ai/athina-sdk) | 126 |
489
- |[thunlp/ChatEval](https://github.com/thunlp/ChatEval) | 126 |
490
- |[prof-frink-lab/slangchain](https://github.com/prof-frink-lab/slangchain) | 126 |
491
- |[vietanhdev/pautobot](https://github.com/vietanhdev/pautobot) | 125 |
492
- |[awslabs/generative-ai-cdk-constructs](https://github.com/awslabs/generative-ai-cdk-constructs) | 124 |
493
- |[sdaaron/QueryGPT](https://github.com/sdaaron/QueryGPT) | 124 |
494
- |[rabbitmetrics/langchain-13-min](https://github.com/rabbitmetrics/langchain-13-min) | 124 |
495
- |[AutoLLM/AutoAgents](https://github.com/AutoLLM/AutoAgents) | 122 |
496
- |[nicknochnack/Nopenai](https://github.com/nicknochnack/Nopenai) | 122 |
497
- |[wombyz/HormoziGPT](https://github.com/wombyz/HormoziGPT) | 122 |
498
- |[dotvignesh/PDFChat](https://github.com/dotvignesh/PDFChat) | 122 |
499
- |[topoteretes/PromethAI-Backend](https://github.com/topoteretes/PromethAI-Backend) | 121 |
500
- |[nftblackmagic/flask-langchain](https://github.com/nftblackmagic/flask-langchain) | 121 |
501
- |[vishwasg217/finsight](https://github.com/vishwasg217/finsight) | 120 |
502
- |[snap-stanford/MLAgentBench](https://github.com/snap-stanford/MLAgentBench) | 120 |
503
- |[Azure/app-service-linux-docs](https://github.com/Azure/app-service-linux-docs) | 120 |
504
- |[nyanp/chat2plot](https://github.com/nyanp/chat2plot) | 120 |
505
- |[ant4g0nist/polar](https://github.com/ant4g0nist/polar) | 119 |
506
- |[aws-samples/cdk-eks-blueprints-patterns](https://github.com/aws-samples/cdk-eks-blueprints-patterns) | 119 |
507
- |[aws-samples/amazon-kendra-langchain-extensions](https://github.com/aws-samples/amazon-kendra-langchain-extensions) | 119 |
508
- |[Xueheng-Li/SynologyChatbotGPT](https://github.com/Xueheng-Li/SynologyChatbotGPT) | 119 |
509
- |[CodeAlchemyAI/ViLT-GPT](https://github.com/CodeAlchemyAI/ViLT-GPT) | 117 |
510
- |[Lin-jun-xiang/docGPT-langchain](https://github.com/Lin-jun-xiang/docGPT-langchain) | 117 |
511
- |[ademakdogan/ChatSQL](https://github.com/ademakdogan/ChatSQL) | 116 |
512
- |[aniketmaurya/llm-inference](https://github.com/aniketmaurya/llm-inference) | 115 |
513
- |[xuwenhao/mactalk-ai-course](https://github.com/xuwenhao/mactalk-ai-course) | 115 |
514
- |[cmooredev/RepoReader](https://github.com/cmooredev/RepoReader) | 115 |
515
- |[abi/autocommit](https://github.com/abi/autocommit) | 115 |
516
- |[MIDORIBIN/langchain-gpt4free](https://github.com/MIDORIBIN/langchain-gpt4free) | 114 |
517
- |[finaldie/auto-news](https://github.com/finaldie/auto-news) | 114 |
518
- |[Anil-matcha/Youtube-to-chatbot](https://github.com/Anil-matcha/Youtube-to-chatbot) | 114 |
519
- |[avrabyt/MemoryBot](https://github.com/avrabyt/MemoryBot) | 114 |
520
- |[Capsize-Games/airunner](https://github.com/Capsize-Games/airunner) | 113 |
521
- |[atisharma/llama_farm](https://github.com/atisharma/llama_farm) | 113 |
522
- |[mbchang/data-driven-characters](https://github.com/mbchang/data-driven-characters) | 112 |
523
- |[fiddler-labs/fiddler-auditor](https://github.com/fiddler-labs/fiddler-auditor) | 112 |
524
- |[dirkjbreeuwer/gpt-automated-web-scraper](https://github.com/dirkjbreeuwer/gpt-automated-web-scraper) | 111 |
525
- |[Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding](https://github.com/Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding) | 111 |
526
- |[hwchase17/langchain-gradio-template](https://github.com/hwchase17/langchain-gradio-template) | 111 |
527
- |[artas728/spelltest](https://github.com/artas728/spelltest) | 110 |
528
- |[NVIDIA/GenerativeAIExamples](https://github.com/NVIDIA/GenerativeAIExamples) | 109 |
529
- |[Azure/aistudio-copilot-sample](https://github.com/Azure/aistudio-copilot-sample) | 108 |
530
- |[codefuse-ai/codefuse-chatbot](https://github.com/codefuse-ai/codefuse-chatbot) | 108 |
531
- |[apirrone/Memento](https://github.com/apirrone/Memento) | 108 |
532
- |[e-johnstonn/GPT-Doc-Summarizer](https://github.com/e-johnstonn/GPT-Doc-Summarizer) | 108 |
533
- |[salesforce/BOLAA](https://github.com/salesforce/BOLAA) | 107 |
534
- |[Erol444/gpt4-openai-api](https://github.com/Erol444/gpt4-openai-api) | 106 |
535
- |[linjungz/chat-with-your-doc](https://github.com/linjungz/chat-with-your-doc) | 106 |
536
- |[crosleythomas/MirrorGPT](https://github.com/crosleythomas/MirrorGPT) | 106 |
537
- |[panaverse/learn-generative-ai](https://github.com/panaverse/learn-generative-ai) | 105 |
538
- |[Azure/azure-sdk-tools](https://github.com/Azure/azure-sdk-tools) | 105 |
539
- |[malywut/gpt_examples](https://github.com/malywut/gpt_examples) | 105 |
540
- |[ritun16/chain-of-verification](https://github.com/ritun16/chain-of-verification) | 104 |
541
- |[langchain-ai/langchain-benchmarks](https://github.com/langchain-ai/langchain-benchmarks) | 104 |
542
- |[lightninglabs/LangChainBitcoin](https://github.com/lightninglabs/LangChainBitcoin) | 104 |
543
- |[flepied/second-brain-agent](https://github.com/flepied/second-brain-agent) | 103 |
544
- |[llmapp/openai.mini](https://github.com/llmapp/openai.mini) | 102 |
545
- |[gimlet-ai/tddGPT](https://github.com/gimlet-ai/tddGPT) | 102 |
546
- |[jlonge4/gpt_chatwithPDF](https://github.com/jlonge4/gpt_chatwithPDF) | 102 |
547
- |[agentification/RAFA_code](https://github.com/agentification/RAFA_code) | 101 |
548
- |[pacman100/DHS-LLM-Workshop](https://github.com/pacman100/DHS-LLM-Workshop) | 101 |
549
- |[aws-samples/private-llm-qa-bot](https://github.com/aws-samples/private-llm-qa-bot) | 101 |
550
-
551
-
552
- _Generated by [github-dependents-info](https://github.com/nvuillam/github-dependents-info)_
553
-
554
- `github-dependents-info --repo "langchain-ai/langchain" --markdownfile dependents.md --minstars 100 --sort stars`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
langchain_md_files/additional_resources/tutorials.mdx DELETED
@@ -1,52 +0,0 @@
1
- # 3rd Party Tutorials
2
-
3
- ## Tutorials
4
-
5
- ### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)
6
- ### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)
7
- ### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)
8
- ### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)
9
- ### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)
10
- ### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)
11
- ### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)
12
- ### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)
13
- ### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)
14
- ### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)
15
- ### [by Total Technology Zonne](https://youtube.com/playlist?list=PLI8raxzYtfGyE02fAxiM1CPhLUuqcTLWg&si=fkAye16rQKBJVHc9)
16
-
17
- ## Courses
18
-
19
- ### Featured courses on Deeplearning.AI
20
-
21
- - [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/)
22
- - [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/)
23
- - [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/)
24
- - [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/)
25
-
26
- ### Online courses
27
-
28
- - [Udemy](https://www.udemy.com/courses/search/?q=langchain)
29
- - [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain)
30
- - [Pluralsight](https://www.pluralsight.com/search?q=langchain)
31
- - [Coursera](https://www.coursera.org/search?query=langchain)
32
- - [Maven](https://maven.com/courses?query=langchain)
33
- - [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)
34
- - [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)
35
- - [edX](https://www.edx.org/search?q=langchain)
36
- - [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)
37
-
38
- ## Short Tutorials
39
-
40
- - [by Nicholas Renotte](https://youtu.be/MlK6SIjcjE8)
41
- - [by Patrick Loeber](https://youtu.be/LbT1yp6quS8)
42
- - [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)
43
- - [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)
44
-
45
- ## Books and Handbooks
46
-
47
- - [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
48
- - [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
49
- - [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
50
- - [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)
51
-
52
- ---------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
langchain_md_files/additional_resources/youtube.mdx DELETED
@@ -1,63 +0,0 @@
1
- # YouTube videos
2
-
3
- [Updated 2024-05-16]
4
-
5
- ### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)
6
-
7
- ### [Tutorials on YouTube](/docs/additional_resources/tutorials/#tutorials)
8
-
9
- ## Videos (sorted by views)
10
-
11
- Only videos with 40K+ views:
12
-
13
- - [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)
14
- - [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)
15
- - [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)
16
- - [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)
17
- - [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)
18
- - [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)
19
- - [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)
20
- - [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)
21
- - [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)
22
- - [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)
23
- - [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)
24
- - [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)
25
- - [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)
26
- - [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)
27
- - [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)
28
- - [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)
29
- - [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)
30
- - [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)
31
- - [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)
32
- - [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)
33
- - [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)
34
- - [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)
35
- - [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)
36
- - [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)
37
- - [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)
38
- - [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)
39
- - [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)
40
- - [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)
41
- - [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)
42
- - [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)
43
- - [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)
44
- - [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)
45
- - [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)
46
- - [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)
47
- - [Prompt Engineering And LLM's With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)
48
- - [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)
49
- - [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)
50
- - [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)
51
- - [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)
52
- - [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)
53
- - [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)
54
- - [What's next for AI agents ft. LangChain's Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)
55
- - [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)
56
- - [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)
57
- - [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)
58
- - [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)
59
- - [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)
60
- - [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)
61
-
62
- ---------------------
63
- [Updated 2024-05-16]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
langchain_md_files/changes/changelog/core.mdx DELETED
@@ -1,10 +0,0 @@
1
- # langchain-core
2
-
3
- ## 0.1.x
4
-
5
- #### Deprecated
6
-
7
- - `BaseChatModel` methods `__call__`, `call_as_llm`, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.invoke` instead.
8
- - `BaseChatModel` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.ainvoke` instead.
9
- - `BaseLLM` methods `__call__, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseLLM.invoke` instead.
10
- - `BaseLLM` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseLLM.ainvoke` instead.
 
 
 
 
 
 
 
 
 
 
 
langchain_md_files/changes/changelog/langchain.mdx DELETED
@@ -1,93 +0,0 @@
1
- # langchain
2
-
3
- ## 0.2.0
4
-
5
- ### Deleted
6
-
7
- As of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain` should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly.
8
-
9
- The following functions and classes require an explicit LLM to be passed as an argument:
10
-
11
- - `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit`
12
- - `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit`
13
- - `langchain.chains.openai_functions.get_openapi_chain`
14
- - `langchain.chains.router.MultiRetrievalQAChain.from_retrievers`
15
- - `langchain.indexes.VectorStoreIndexWrapper.query`
16
- - `langchain.indexes.VectorStoreIndexWrapper.query_with_sources`
17
- - `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources`
18
- - `langchain.chains.flare.FlareChain`
19
-
20
- The following classes now require passing an explicit Embedding model as an argument:
21
-
22
- - `langchain.indexes.VectostoreIndexCreator`
23
-
24
- The following code has been removed:
25
-
26
- - `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method.
27
-
28
- ### Deprecated
29
-
30
- We have two main types of deprecations:
31
-
32
- 1. Code that was moved from `langchain` into another package (e.g, `langchain-community`)
33
-
34
- If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
35
-
36
- ```python
37
- python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
38
-
39
- ```
40
-
41
- ```python
42
- LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
43
-
44
- >> from langchain.document_loaders import UnstructuredMarkdownLoader
45
-
46
- with new imports of:
47
-
48
- >> from langchain_community.document_loaders import UnstructuredMarkdownLoader
49
- ```
50
-
51
- We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
52
-
53
- However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, we’re releasing a migration script via the LangChain CLI. See further instructions in migration guide.
54
-
55
- 1. Code that has better alternatives available and will eventually be removed, so there’s only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
56
-
57
- Many of these were marked for removal in 0.2. We have bumped the removal to 0.3.
58
-
59
-
60
- ## 0.1.0 (Jan 5, 2024)
61
-
62
- ### Deleted
63
-
64
- No deletions.
65
-
66
- ### Deprecated
67
-
68
- Deprecated classes and methods will be removed in 0.2.0
69
-
70
- | Deprecated | Alternative | Reason |
71
- |---------------------------------|-----------------------------------|------------------------------------------------|
72
- | ChatVectorDBChain | ConversationalRetrievalChain | More general to all retrievers |
73
- | create_ernie_fn_chain | create_ernie_fn_runnable | Use LCEL under the hood |
74
- | created_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood |
75
- | NatBotChain | | Not used |
76
- | create_openai_fn_chain | create_openai_fn_runnable | Use LCEL under the hood |
77
- | create_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood |
78
- | load_query_constructor_chain | load_query_constructor_runnable | Use LCEL under the hood |
79
- | VectorDBQA | RetrievalQA | More general to all retrievers |
80
- | Sequential Chain | LCEL | Obviated by LCEL |
81
- | SimpleSequentialChain | LCEL | Obviated by LCEL |
82
- | TransformChain | LCEL/RunnableLambda | Obviated by LCEL |
83
- | create_tagging_chain | create_structured_output_runnable | Use LCEL under the hood |
84
- | ChatAgent | create_react_agent | Use LCEL builder over a class |
85
- | ConversationalAgent | create_react_agent | Use LCEL builder over a class |
86
- | ConversationalChatAgent | create_json_chat_agent | Use LCEL builder over a class |
87
- | initialize_agent | Individual create agent methods | Individual create agent methods are more clear |
88
- | ZeroShotAgent | create_react_agent | Use LCEL builder over a class |
89
- | OpenAIFunctionsAgent | create_openai_functions_agent | Use LCEL builder over a class |
90
- | OpenAIMultiFunctionsAgent | create_openai_tools_agent | Use LCEL builder over a class |
91
- | SelfAskWithSearchAgent | create_self_ask_with_search | Use LCEL builder over a class |
92
- | StructuredChatAgent | create_structured_chat_agent | Use LCEL builder over a class |
93
- | XMLAgent | create_xml_agent | Use LCEL builder over a class |