your-github-username commited on
Commit
4bbb911
Β·
1 Parent(s): eca622b

Auto-update from GitHub Actions

Browse files
Files changed (31) hide show
  1. README.md +2 -12
  2. app.py +1 -1
  3. hf_space/hf_space/README.md +3 -3
  4. hf_space/hf_space/hf_space/app.py +9 -6
  5. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md +1 -5
  6. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md +166 -1
  7. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/AI_Reaserch_Buddy/.gitattributes +35 -0
  8. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/AI_Reaserch_Buddy/README.md +14 -0
  9. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/AI_Reaserch_Buddy/app.py +64 -0
  10. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/AI_Reaserch_Buddy/requirements.txt +1 -0
  11. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.github/workflows/deploy.yml +26 -0
  12. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.gitignore +171 -0
  13. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/LICENSE +21 -0
  14. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md +1 -14
  15. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/ai_research_buddy/__init__.py +0 -0
  16. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/ai_research_buddy/research_buddy.py +35 -0
  17. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +146 -0
  18. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/config.yaml +6 -0
  19. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.gitattributes +35 -0
  20. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md +14 -0
  21. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt +10 -0
  22. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/research_data.db +0 -0
  23. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/setup.py +29 -0
  24. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/AI_agent.py +141 -0
  25. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/RAG.py +33 -0
  26. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/__init__.py +0 -0
  27. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/data_ingestion.py +32 -0
  28. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/logger.py +13 -0
  29. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/query_enhancer.py +36 -0
  30. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/test.ipynb +286 -0
  31. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt +2 -1
README.md CHANGED
@@ -1,14 +1,3 @@
1
- ---
2
- license: apache-2.0
3
- title: AI_Research_Buddy
4
- sdk: gradio
5
- emoji: πŸ“š
6
- colorFrom: green
7
- colorTo: blue
8
- pinned: true
9
- short_description: Your Research Sidekick
10
- sdk_version: 5.18.0
11
- ---
12
  # AI Research Buddy: Your Conversational RAG Sidekick
13
 
14
  ![Image](https://github.com/user-attachments/assets/3e8af607-202c-4c1e-a634-a12c8b3733e5)
@@ -169,4 +158,5 @@ MIT Licenseβ€”feel free to fork, tweak, and share!
169
  ---
170
 
171
  **Created by Nithin | February 24, 2025**
172
- [GitHub](https://github.com/Nithin8919) | [Hugging Face](https://huggingface.co/Nithin89)
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # AI Research Buddy: Your Conversational RAG Sidekick
2
 
3
  ![Image](https://github.com/user-attachments/assets/3e8af607-202c-4c1e-a634-a12c8b3733e5)
 
158
  ---
159
 
160
  **Created by Nithin | February 24, 2025**
161
+ [GitHub](https://github.com/Nithin8919) | [Hugging Face](https://huggingface.co/Nithin89)
162
+
app.py CHANGED
@@ -146,4 +146,4 @@ demo = gr.Interface(
146
  )
147
 
148
  if __name__ == "__main__":
149
- demo.launch()
 
146
  )
147
 
148
  if __name__ == "__main__":
149
+ demo.launch(share=True)
hf_space/hf_space/README.md CHANGED
@@ -1,9 +1,9 @@
1
  # AI Research Buddy: Your Conversational RAG Sidekick
2
 
3
- ![Image](https://github.com/user-attachments/assets/fa4f6ef8-029c-46a2-9984-882147c151e5)
4
  *Unleash the power of research with a twist of AI magic!*
5
 
6
- Welcome to **AI Research Buddy**, a slick, conversational app that dives into the vast ocean of arXiv papers to fetch, summarize, and source the best research on any topic you throw at itβ€”like "RAG" (yep, it’s meta enough to research itself!). Built from the ground up by **Nithin** (that’s me!), this project is a living testament to my mastery of **Retrieval-Augmented Generation (RAG)** and **agents**, blending advanced retrieval tricks with a chatty AI that’s always ready to dig deeper. Whether you’re a student, researcher, or just a curious mind, this buddy’s got your backβ€”all running on a humble MacBook M3 Pro with 8GB RAM as of February 24, 2025!
7
 
8
  ---
9
 
@@ -33,7 +33,7 @@ This isn’t just a demoβ€”it’s proof that RAG and agents can team up to turn
33
  - **Conversational Memory**: Tracks history with `ConversationBufferMemory`, adapting to follow-ups like "Tell me more."
34
  - **Decision-Making**: Smartly chooses the best papers and adjusts responses based on your input.
35
 
36
- - **Lightweight Design**: Runs smoothly on 8GB RAM (~700MB footprint), optimized for my MacBook M3 Pro.
37
 
38
  ---
39
 
 
1
  # AI Research Buddy: Your Conversational RAG Sidekick
2
 
3
+ ![Image](https://github.com/user-attachments/assets/3e8af607-202c-4c1e-a634-a12c8b3733e5)
4
  *Unleash the power of research with a twist of AI magic!*
5
 
6
+ Welcome to **AI Research Buddy**, a slick, conversational app that dives into the vast ocean of arXiv papers to fetch, summarize, and source the best research on any topic you throw at itβ€”like "RAG" (yep, it’s meta enough to research itself!). Built from the ground up by **Nithin** (that’s me!), this project is a living testament to my mastery of **Retrieval-Augmented Generation (RAG)** and **agents**, blending advanced retrieval tricks with a chatty AI that’s always ready to dig deeper. Whether you’re a student, researcher, or just a curious mind, this buddy’s got your backβ€”all running.
7
 
8
  ---
9
 
 
33
  - **Conversational Memory**: Tracks history with `ConversationBufferMemory`, adapting to follow-ups like "Tell me more."
34
  - **Decision-Making**: Smartly chooses the best papers and adjusts responses based on your input.
35
 
36
+ - **Lightweight Design**: Runs smoothly on 8GB RAM (~700MB footprint).
37
 
38
  ---
39
 
hf_space/hf_space/hf_space/app.py CHANGED
@@ -7,6 +7,7 @@ from langchain.memory import ConversationBufferMemory
7
  from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
8
  from loguru import logger
9
  import numpy as np
 
10
 
11
  # --- DataIngestion Class with Query Expansion ---
12
  class DataIngestion:
@@ -51,7 +52,7 @@ class DataIngestion:
51
 
52
  # --- RetrievalModule Class with Reranking ---
53
  class RetrievalModule:
54
- def __init__(self, embedding_model="all-MiniLM-L6-v2", persist_dir="./chroma_db"):
55
  self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model)
56
  self.vector_store = None
57
  self.persist_dir = persist_dir
@@ -67,14 +68,15 @@ class RetrievalModule:
67
  texts=abstracts, embedding=self.embeddings, metadatas=metadatas, persist_directory=self.persist_dir
68
  )
69
  self.vector_store.persist()
70
- logger.info("Chroma vector store built.")
71
 
72
  def rerank(self, query, retrieved):
73
  if not retrieved:
74
  return retrieved
75
  inputs = [f"{query} [SEP] {doc[0]}" for doc in retrieved]
76
  tokenized = self.reranker_tokenizer(inputs, return_tensors="pt", padding=True, truncation=True, max_length=512)
77
- scores = self.reranker_model(**tokenized).logits.squeeze().detach().numpy()
 
78
  ranked_indices = np.argsort(scores)[::-1]
79
  return [retrieved[i] for i in ranked_indices[:3]]
80
 
@@ -100,7 +102,7 @@ def process_query(query):
100
  # Check chat history for follow-up context
101
  history = memory.load_memory_variables({})["chat_history"]
102
  if history and "more" in query.lower():
103
- last_output = history[-1]["content"] if history else ""
104
  context = "\n".join([line for line in last_output.split("\n") if "Summary" in line])
105
  else:
106
  # Fetch and retrieve papers for new query
@@ -139,8 +141,9 @@ demo = gr.Interface(
139
  fn=process_query,
140
  inputs=gr.Textbox(label="Enter your research query (e.g., 'RAG' or 'Tell me more')"),
141
  outputs=gr.Textbox(label="Result"),
142
- title="Conversational RAG Demo",
143
  description="Retrieve summaries of the best papers on your topic with their sources. Ask follow-ups like 'Tell me more.'"
144
  )
145
 
146
- demo.launch()
 
 
7
  from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
8
  from loguru import logger
9
  import numpy as np
10
+ import torch
11
 
12
  # --- DataIngestion Class with Query Expansion ---
13
  class DataIngestion:
 
52
 
53
  # --- RetrievalModule Class with Reranking ---
54
  class RetrievalModule:
55
+ def __init__(self, embedding_model="sentence-transformers/all-MiniLM-L6-v2", persist_dir="./chroma_db"):
56
  self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model)
57
  self.vector_store = None
58
  self.persist_dir = persist_dir
 
68
  texts=abstracts, embedding=self.embeddings, metadatas=metadatas, persist_directory=self.persist_dir
69
  )
70
  self.vector_store.persist()
71
+ logger.info("Chroma vector store built and persisted.")
72
 
73
  def rerank(self, query, retrieved):
74
  if not retrieved:
75
  return retrieved
76
  inputs = [f"{query} [SEP] {doc[0]}" for doc in retrieved]
77
  tokenized = self.reranker_tokenizer(inputs, return_tensors="pt", padding=True, truncation=True, max_length=512)
78
+ with torch.no_grad():
79
+ scores = self.reranker_model(**tokenized).logits.squeeze().detach().numpy()
80
  ranked_indices = np.argsort(scores)[::-1]
81
  return [retrieved[i] for i in ranked_indices[:3]]
82
 
 
102
  # Check chat history for follow-up context
103
  history = memory.load_memory_variables({})["chat_history"]
104
  if history and "more" in query.lower():
105
+ last_output = history[-1].content if history else "" # Fixed AIMessage access
106
  context = "\n".join([line for line in last_output.split("\n") if "Summary" in line])
107
  else:
108
  # Fetch and retrieve papers for new query
 
141
  fn=process_query,
142
  inputs=gr.Textbox(label="Enter your research query (e.g., 'RAG' or 'Tell me more')"),
143
  outputs=gr.Textbox(label="Result"),
144
+ title="AI Research Buddy",
145
  description="Retrieve summaries of the best papers on your topic with their sources. Ask follow-ups like 'Tell me more.'"
146
  )
147
 
148
+ if __name__ == "__main__":
149
+ demo.launch()
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md CHANGED
@@ -1,10 +1,6 @@
1
- # AI_Research_BuddyBelow is a polished, comprehensive, and engaging **README.md** file for your **AI_Research_Buddy** project. This documentation is designed to showcase your expertise in **Retrieval-Augmented Generation (RAG)** and **agents**, explain the project’s purpose and functionality, and provide clear instructions for setup and usage. It’s professional yet approachable, perfect for your GitHub repo (`https://github.com/Nithin8919/AI_Research_Buddy`) and Hugging Face Space (`https://huggingface.co/spaces/Nithin89/AI_Reaserch_Buddy` or corrected version).
2
-
3
- ---
4
-
5
  # AI Research Buddy: Your Conversational RAG Sidekick
6
 
7
- ![AI Research Buddy Logo](https://via.placeholder.com/150?text=AI+Research+Buddy)
8
  *Unleash the power of research with a twist of AI magic!*
9
 
10
  Welcome to **AI Research Buddy**, a slick, conversational app that dives into the vast ocean of arXiv papers to fetch, summarize, and source the best research on any topic you throw at itβ€”like "RAG" (yep, it’s meta enough to research itself!). Built from the ground up by **Nithin** (that’s me!), this project is a living testament to my mastery of **Retrieval-Augmented Generation (RAG)** and **agents**, blending advanced retrieval tricks with a chatty AI that’s always ready to dig deeper. Whether you’re a student, researcher, or just a curious mind, this buddy’s got your backβ€”all running on a humble MacBook M3 Pro with 8GB RAM as of February 24, 2025!
 
 
 
 
 
1
  # AI Research Buddy: Your Conversational RAG Sidekick
2
 
3
+ ![Image](https://github.com/user-attachments/assets/fa4f6ef8-029c-46a2-9984-882147c151e5)
4
  *Unleash the power of research with a twist of AI magic!*
5
 
6
  Welcome to **AI Research Buddy**, a slick, conversational app that dives into the vast ocean of arXiv papers to fetch, summarize, and source the best research on any topic you throw at itβ€”like "RAG" (yep, it’s meta enough to research itself!). Built from the ground up by **Nithin** (that’s me!), this project is a living testament to my mastery of **Retrieval-Augmented Generation (RAG)** and **agents**, blending advanced retrieval tricks with a chatty AI that’s always ready to dig deeper. Whether you’re a student, researcher, or just a curious mind, this buddy’s got your backβ€”all running on a humble MacBook M3 Pro with 8GB RAM as of February 24, 2025!
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md CHANGED
@@ -1 +1,166 @@
1
- # AI_Research_Buddy
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI_Research_BuddyBelow is a polished, comprehensive, and engaging **README.md** file for your **AI_Research_Buddy** project. This documentation is designed to showcase your expertise in **Retrieval-Augmented Generation (RAG)** and **agents**, explain the project’s purpose and functionality, and provide clear instructions for setup and usage. It’s professional yet approachable, perfect for your GitHub repo (`https://github.com/Nithin8919/AI_Research_Buddy`) and Hugging Face Space (`https://huggingface.co/spaces/Nithin89/AI_Reaserch_Buddy` or corrected version).
2
+
3
+ ---
4
+
5
+ # AI Research Buddy: Your Conversational RAG Sidekick
6
+
7
+ ![AI Research Buddy Logo](https://via.placeholder.com/150?text=AI+Research+Buddy)
8
+ *Unleash the power of research with a twist of AI magic!*
9
+
10
+ Welcome to **AI Research Buddy**, a slick, conversational app that dives into the vast ocean of arXiv papers to fetch, summarize, and source the best research on any topic you throw at itβ€”like "RAG" (yep, it’s meta enough to research itself!). Built from the ground up by **Nithin** (that’s me!), this project is a living testament to my mastery of **Retrieval-Augmented Generation (RAG)** and **agents**, blending advanced retrieval tricks with a chatty AI that’s always ready to dig deeper. Whether you’re a student, researcher, or just a curious mind, this buddy’s got your backβ€”all running on a humble MacBook M3 Pro with 8GB RAM as of February 24, 2025!
11
+
12
+ ---
13
+
14
+ ## πŸš€ What’s This All About?
15
+
16
+ AI Research Buddy isn’t just another research toolβ€”it’s a **conversational RAG agent** with a mission: to make exploring academic papers fast, fun, and insightful. Here’s the gist:
17
+
18
+ - **Ask Anything**: Type a topic (e.g., "RAG") or a follow-up (e.g., "Tell me more"), and watch it work its magic.
19
+ - **Smart Retrieval**: It grabs papers from arXiv, expands your query (think "RAG" β†’ "Retrieval-Augmented Generation"), and reranks them to spotlight the best.
20
+ - **Snappy Summaries**: Powered by `distilgpt2`, it crafts concise summaries of the top papers, serving up knowledge in bite-sized chunks.
21
+ - **Sources Included**: Every summary comes with clickable arXiv links, so you can dive into the originals.
22
+ - **Chatty Agent**: Ask for more, and it’ll refine the story using what it’s already foundβ€”no extra digging required.
23
+
24
+ This isn’t just a demoβ€”it’s proof that RAG and agents can team up to turn research into a conversation, all while keeping things light on your hardware.
25
+
26
+ ---
27
+
28
+ ## 🌟 Features That Shine
29
+
30
+ - **Advanced RAG Magic**:
31
+ - **Query Expansion**: Boosts recall by adding synonyms (e.g., "AI" β†’ "Artificial Intelligence").
32
+ - **Reranking**: Uses a cross-encoder (`cross-encoder/ms-marco-MiniLM-L-6-v2`) to pick the top 3 papers with precision.
33
+ - **Generation**: Summarizes with `distilgpt2`, generating up to 100 new tokens for crisp, relevant outputs.
34
+
35
+ - **Agent Awesomeness**:
36
+ - **Autonomy**: Fetches, ranks, and summarizes papers without hand-holding.
37
+ - **Conversational Memory**: Tracks history with `ConversationBufferMemory`, adapting to follow-ups like "Tell me more."
38
+ - **Decision-Making**: Smartly chooses the best papers and adjusts responses based on your input.
39
+
40
+ - **Lightweight Design**: Runs smoothly on 8GB RAM (~700MB footprint), optimized for my MacBook M3 Pro.
41
+
42
+ ---
43
+
44
+ ## πŸŽ‰ Try It Out!
45
+
46
+ Hosted live on [Hugging Face Spaces](https://huggingface.co/spaces/Nithin89/AI_Reaserch_Buddy), AI Research Buddy is ready to roll! Here’s what you’ll see:
47
+
48
+ - **Input**: "RAG"
49
+ ```
50
+ πŸ“œ **Summary of Best Papers on RAG:**
51
+ Retrieval-Augmented Generation (RAG) enhances language models by integrating external knowledge retrieval, improving performance on knowledge-intensive tasks. Research highlights modular frameworks and benchmarks.
52
+
53
+ **Sources:**
54
+ - Modular RAG: Transforming RAG Systems ([link](https://export.arxiv.org/abs/2407.21059v1))
55
+ - ARAGOG: Advanced RAG Output Grading ([link](https://export.arxiv.org/abs/2404.01037v1))
56
+ - CRAG -- Comprehensive RAG Benchmark ([link](https://export.arxiv.org/abs/2406.04744v2))
57
+ ```
58
+
59
+ - **Follow-Up**: "Tell me more"
60
+ ```
61
+ πŸ“œ **More on RAG:**
62
+ Modular RAG offers reconfigurable frameworks, while CRAG benchmarks evaluate real-world QA, advancing RAG applications.
63
+ ```
64
+
65
+ ---
66
+
67
+ ## πŸ› οΈ How It Works
68
+
69
+ Here’s the techy breakdown:
70
+
71
+ 1. **Retrieval**:
72
+ - **DataIngestion**: Fetches up to 5 papers from arXiv with an expanded query (`ti:{query} OR ab:{query}`).
73
+ - **RetrievalModule**: Builds a Chroma vector store with `all-MiniLM-L6-v2` embeddings, retrieves 5 papers, and reranks to the top 3 using a cross-encoder.
74
+
75
+ 2. **Generation**:
76
+ - Combines retrieved abstracts into a prompt, then uses `distilgpt2` to generate a 100-token summary.
77
+
78
+ 3. **Agent Behavior**:
79
+ - `ConversationBufferMemory` tracks chat history, reusing context for follow-ups.
80
+ - Adapts output based on whether it’s a new query or a deeper dive.
81
+
82
+ 4. **Output**:
83
+ - New queries get summaries + sources; follow-ups refine the summary.
84
+
85
+ All this runs in a Gradio app, deployed to Hugging Face Spaces for the world to see!
86
+
87
+ ---
88
+
89
+ ## πŸ“¦ Setup and Installation
90
+
91
+ Want to run it locally or tweak it? Here’s how:
92
+
93
+ ### **Prerequisites**
94
+ - Python 3.8+
95
+ - Git installed
96
+ - ~1GB free disk space (for models)
97
+
98
+ ### **Steps**
99
+ 1. **Clone the Repo**:
100
+ ```bash
101
+ git clone https://github.com/Nithin8919/AI_Research_Buddy.git
102
+ cd AI_Research_Buddy
103
+ ```
104
+
105
+ 2. **Install Dependencies**:
106
+ ```bash
107
+ pip install -r requirements.txt
108
+ ```
109
+ *requirements.txt*:
110
+ ```
111
+ gradio
112
+ requests
113
+ langchain
114
+ langchain-community
115
+ transformers
116
+ huggingface-hub
117
+ loguru
118
+ numpy
119
+ torch
120
+ ```
121
+
122
+ 3. **Run It**:
123
+ ```bash
124
+ python app.py
125
+ ```
126
+ - Opens at `http://127.0.0.1:7860`.
127
+ - Add `share=True` to `demo.launch()` for a temporary public URL.
128
+
129
+ 4. **Test**: Try "RAG" and "Tell me more" in the browser.
130
+
131
+ ---
132
+
133
+ ## 🌍 Deployment
134
+
135
+ Live at [Hugging Face Spaces](https://huggingface.co/spaces/Nithin89/AI_Reaserch_Buddy)! To deploy your own:
136
+
137
+ 1. **Push to HF Space**:
138
+ - Add your Space as a remote:
139
+ ```bash
140
+ git remote add space https://Nithin89:<HF_TOKEN>@huggingface.co/spaces/Nithin89/AI_Reaserch_Buddy
141
+ git push space main --force
142
+ ```
143
+ - Replace `<HF_TOKEN>` with your Hugging Face token.
144
+
145
+ 2. **Build**: HF auto-builds from `app.py` and `requirements.txt`.
146
+
147
+ ---
148
+
149
+ ## πŸ’‘ Why It’s Awesome
150
+
151
+ - **RAG Mastery**: Shows off query expansion, reranking, and generationβ€”core RAG skills.
152
+ - **Agent Vibes**: Conversational, autonomous, and adaptive, proving I get agents.
153
+ - **Lean & Mean**: Runs on 8GB RAM, a testament to efficient design.
154
+ - **Fun Factor**: Research doesn’t have to be dullβ€”this buddy’s got personality!
155
+
156
+ ---
157
+
158
+ ## πŸ“œ License
159
+
160
+ MIT Licenseβ€”feel free to fork, tweak, and share!
161
+
162
+ ---
163
+
164
+ **Created by Nithin | February 24, 2025**
165
+ [GitHub](https://github.com/Nithin8919) | [Hugging Face](https://huggingface.co/Nithin89)
166
+
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/AI_Reaserch_Buddy/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/AI_Reaserch_Buddy/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: AI Reaserch Buddy
3
+ emoji: πŸ’¬
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 5.0.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: Your Research Sidekick
12
+ ---
13
+
14
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/AI_Reaserch_Buddy/app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ messages = [{"role": "system", "content": system_message}]
19
+
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
+
26
+ messages.append({"role": "user", "content": message})
27
+
28
+ response = ""
29
+
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
+
39
+ response += token
40
+ yield response
41
+
42
+
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
+ demo = gr.ChatInterface(
47
+ respond,
48
+ additional_inputs=[
49
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
+ gr.Slider(
53
+ minimum=0.1,
54
+ maximum=1.0,
55
+ value=0.95,
56
+ step=0.05,
57
+ label="Top-p (nucleus sampling)",
58
+ ),
59
+ ],
60
+ )
61
+
62
+
63
+ if __name__ == "__main__":
64
+ demo.launch()
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/AI_Reaserch_Buddy/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ huggingface_hub==0.25.2
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.github/workflows/deploy.yml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy to Hugging Face Space
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ deploy:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - name: Checkout Repository
13
+ uses: actions/checkout@v3
14
+
15
+ - name: Push to Hugging Face
16
+ env:
17
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
18
+ run: |
19
+ git config --global user.email "[email protected]"
20
+ git config --global user.name "your-github-username"
21
+ git clone https://user:[email protected]/spaces/Nithin89/AI_Research_Buddy hf_space
22
+ rsync -av --exclude '.git' . hf_space/
23
+ cd hf_space
24
+ git add .
25
+ git commit -m "Auto-update from GitHub Actions" || echo "No changes to commit"
26
+ git push origin main
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.gitignore ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+ chroma_db/
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # PyPI configuration file
171
+ .pypirc
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Nithin8919
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md CHANGED
@@ -1,14 +1 @@
1
- ---
2
- title: AI Research Buddy
3
- emoji: πŸ”₯
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.17.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- short_description: Your Research Sidekick
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # AI_Research_Buddy
 
 
 
 
 
 
 
 
 
 
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/ai_research_buddy/__init__.py ADDED
File without changes
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/ai_research_buddy/research_buddy.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from src.data_ingestion import DataIngestion
3
+ from src.RAG import RetrievalModule
4
+ from src.AI_agent import ResearchAgent
5
+ from src.logger import logger
6
+
7
+ class ResearchBuddyPipeline:
8
+ def __init__(self, config):
9
+ self.config = config
10
+ self.ingestor = DataIngestion(self.config["api_url"])
11
+ self.retriever = RetrievalModule(self.config["embedding_model"], self.config["persist_dir"])
12
+ self.agent = ResearchAgent(self.config["summarizer_model"])
13
+ self.openers = [
14
+ "Hold my coffee, I’m diving into this!",
15
+ "Time to unleash my inner paper monster!",
16
+ "Buckle up, we’re raiding the research jungle!",
17
+ "Let’s crank this up to elevenβ€”here we go!"
18
+ ]
19
+
20
+ def process_query(self, topic, query):
21
+ opener = random.choice(self.openers)
22
+ logger.info(f"Processing query for topic: {topic}")
23
+
24
+ titles, abstracts = self.ingestor.fetch_papers(topic, self.config["max_results"])
25
+ if not abstracts:
26
+ return f"{opener}\n\nNo research found for '{topic}'. Try a different topic?"
27
+
28
+ summaries = self.agent.summarize_papers(abstracts)
29
+ self.retriever.build_vector_store(summaries)
30
+ relevant_papers = self.retriever.retrieve_relevant(query, k=self.config["top_k"])
31
+
32
+ if not relevant_papers:
33
+ return f"{opener}\n\nNo relevant results for '{query}'. Try refining your query?"
34
+
35
+ return f"{opener}\n\n" + self.agent.chat_response(None, relevant_papers, topic, query)
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import xml.etree.ElementTree as ET
4
+ from langchain.vectorstores import Chroma
5
+ from langchain.embeddings import HuggingFaceEmbeddings
6
+ from langchain.memory import ConversationBufferMemory
7
+ from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
8
+ from loguru import logger
9
+ import numpy as np
10
+
11
+ # --- DataIngestion Class with Query Expansion ---
12
+ class DataIngestion:
13
+ def __init__(self, api_url="http://export.arxiv.org/api/query"):
14
+ self.api_url = api_url
15
+ self.synonyms = {
16
+ "RAG": "Retrieval-Augmented Generation",
17
+ "AI": "Artificial Intelligence",
18
+ "ML": "Machine Learning"
19
+ }
20
+
21
+ def expand_query(self, query):
22
+ expanded = query
23
+ for key, value in self.synonyms.items():
24
+ if key.lower() in query.lower():
25
+ expanded += f" OR {value}"
26
+ logger.info(f"Expanded query: {expanded}")
27
+ return expanded
28
+
29
+ def fetch_papers(self, topic, max_results=5):
30
+ expanded_query = self.expand_query(topic)
31
+ url = f"{self.api_url}?search_query=ti:{expanded_query}+OR+ab:{expanded_query}&start=0&max_results={max_results}"
32
+ logger.info(f"Fetching papers from: {url}")
33
+ try:
34
+ response = requests.get(url, timeout=10)
35
+ response.raise_for_status()
36
+ except requests.exceptions.RequestException as e:
37
+ logger.error(f"Error fetching papers: {e}")
38
+ return [], [], []
39
+ root = ET.fromstring(response.text)
40
+ titles, abstracts, paper_ids = [], [], []
41
+ for entry in root.findall("{http://www.w3.org/2005/Atom}entry"):
42
+ title = entry.find("{http://www.w3.org/2005/Atom}title").text.strip()
43
+ abstract = entry.find("{http://www.w3.org/2005/Atom}summary").text.strip()
44
+ paper_id_elem = entry.find("{http://www.w3.org/2005/Atom}id")
45
+ paper_id = paper_id_elem.text.split("abs/")[-1].strip() if paper_id_elem is not None else "unknown"
46
+ titles.append(title)
47
+ abstracts.append(abstract)
48
+ paper_ids.append(paper_id)
49
+ logger.info(f"Fetched {len(abstracts)} papers.")
50
+ return titles, abstracts, paper_ids
51
+
52
+ # --- RetrievalModule Class with Reranking ---
53
+ class RetrievalModule:
54
+ def __init__(self, embedding_model="all-MiniLM-L6-v2", persist_dir="./chroma_db"):
55
+ self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model)
56
+ self.vector_store = None
57
+ self.persist_dir = persist_dir
58
+ self.reranker_model = AutoModelForSequenceClassification.from_pretrained("cross-encoder/ms-marco-MiniLM-L-6-v2")
59
+ self.reranker_tokenizer = AutoTokenizer.from_pretrained("cross-encoder/ms-marco-MiniLM-L-6-v2")
60
+
61
+ def build_vector_store(self, abstracts, titles, paper_ids):
62
+ if not abstracts:
63
+ logger.warning("No abstracts provided. Skipping vector store creation.")
64
+ return
65
+ metadatas = [{"title": title, "paper_id": pid} for title, pid in zip(titles, paper_ids)]
66
+ self.vector_store = Chroma.from_texts(
67
+ texts=abstracts, embedding=self.embeddings, metadatas=metadatas, persist_directory=self.persist_dir
68
+ )
69
+ self.vector_store.persist()
70
+ logger.info("Chroma vector store built.")
71
+
72
+ def rerank(self, query, retrieved):
73
+ if not retrieved:
74
+ return retrieved
75
+ inputs = [f"{query} [SEP] {doc[0]}" for doc in retrieved]
76
+ tokenized = self.reranker_tokenizer(inputs, return_tensors="pt", padding=True, truncation=True, max_length=512)
77
+ scores = self.reranker_model(**tokenized).logits.squeeze().detach().numpy()
78
+ ranked_indices = np.argsort(scores)[::-1]
79
+ return [retrieved[i] for i in ranked_indices[:3]]
80
+
81
+ def retrieve_relevant(self, query, k=5):
82
+ if not self.vector_store:
83
+ logger.warning("Vector store empty. Run `build_vector_store` first.")
84
+ return []
85
+ top_docs = self.vector_store.similarity_search(query, k=k)
86
+ retrieved = [(doc.page_content, doc.metadata) for doc in top_docs]
87
+ reranked = self.rerank(query, retrieved)
88
+ logger.info(f"Retrieved and reranked {len(reranked)} papers for query: '{query}'.")
89
+ return reranked
90
+
91
+ # --- Main Application Logic ---
92
+ data_ingestion = DataIngestion()
93
+ retrieval_module = RetrievalModule()
94
+ generator = pipeline("text-generation", model="distilgpt2")
95
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
96
+
97
+ def process_query(query):
98
+ """Retrieve and summarize the best papers with their sources."""
99
+ try:
100
+ # Check chat history for follow-up context
101
+ history = memory.load_memory_variables({})["chat_history"]
102
+ if history and "more" in query.lower():
103
+ last_output = history[-1]["content"] if history else ""
104
+ context = "\n".join([line for line in last_output.split("\n") if "Summary" in line])
105
+ else:
106
+ # Fetch and retrieve papers for new query
107
+ titles, abstracts, paper_ids = data_ingestion.fetch_papers(query)
108
+ if not abstracts:
109
+ return "No papers found after query expansion."
110
+ retrieval_module.build_vector_store(abstracts, titles, paper_ids)
111
+ retrieved = retrieval_module.retrieve_relevant(query)
112
+ if not retrieved:
113
+ return "No relevant papers retrieved."
114
+ retrieved_abstracts = [item[0] for item in retrieved]
115
+ retrieved_metadata = [item[1] for item in retrieved]
116
+ context = "\n".join(retrieved_abstracts)
117
+ memory.save_context({"input": "Retrieved papers"}, {"output": context})
118
+
119
+ # Generate a concise summary of the best papers
120
+ prompt = f"Summarize the best research papers on {query} based on these abstracts:\n{context}"
121
+ summary = generator(prompt, max_new_tokens=100, num_return_sequences=1, truncation=True)[0]["generated_text"]
122
+
123
+ # Include sources if not a follow-up
124
+ if "more" not in query.lower():
125
+ papers_ref = "\n".join([f"- {m['title']} ([link](https://export.arxiv.org/abs/{m['paper_id']}))" for m in retrieved_metadata])
126
+ full_output = f"πŸ“œ **Summary of Best Papers on {query}:**\n{summary}\n\n**Sources:**\n{papers_ref}"
127
+ else:
128
+ full_output = f"πŸ“œ **More on {query}:**\n{summary}"
129
+
130
+ memory.save_context({"input": query}, {"output": full_output})
131
+ return full_output
132
+
133
+ except Exception as e:
134
+ logger.error(f"Error: {str(e)}")
135
+ return f"Error: {str(e)}"
136
+
137
+ # --- Gradio Interface ---
138
+ demo = gr.Interface(
139
+ fn=process_query,
140
+ inputs=gr.Textbox(label="Enter your research query (e.g., 'RAG' or 'Tell me more')"),
141
+ outputs=gr.Textbox(label="Result"),
142
+ title="Conversational RAG Demo",
143
+ description="Retrieve summaries of the best papers on your topic with their sources. Ask follow-ups like 'Tell me more.'"
144
+ )
145
+
146
+ demo.launch()
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/config.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ api_url: "http://export.arxiv.org/api/query"
2
+ embedding_model: "all-MiniLM-L6-v2"
3
+ summarizer_model: "facebook/bart-large-cnn"
4
+ max_results: 5
5
+ top_k: 2
6
+ persist_dir: "./chroma_db"
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: AI Research Buddy
3
+ emoji: πŸ”₯
4
+ colorFrom: indigo
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 5.17.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: Your Research Sidekick
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ requests
3
+ langchain
4
+ langchain-community
5
+ transformers
6
+ huggingface-hub
7
+ loguru
8
+ numpy
9
+ torch
10
+
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/research_data.db ADDED
Binary file (8.19 kB). View file
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/setup.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="ai_research_buddy",
5
+ version="0.1.0",
6
+ author="Nithin",
7
+ author_email="[email protected]",
8
+ description="An AI-powered research assistant using RAG, FAISS, LangChain, and Transformers.",
9
+ long_description=open("README.md").read(),
10
+ long_description_content_type="text/markdown",
11
+ url="https://github.com/Nithin8919/AI_Research_Buddy",
12
+ packages=find_packages(),
13
+ install_requires=[
14
+ "requests",
15
+ "langchain",
16
+ "faiss-cpu",
17
+ "transformers",
18
+ "torch",
19
+ "sentence-transformers",
20
+ "langchain_community",
21
+ "faiss-cpu",
22
+ ],
23
+ classifiers=[
24
+ "Programming Language :: Python :: 3",
25
+ "License :: OSI Approved :: MIT License",
26
+ "Operating System :: OS Independent",
27
+ ],
28
+ python_requires=">=3.7",
29
+ )
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/AI_agent.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI_agent.py
2
+ from langchain_community.llms import LlamaCpp
3
+ from langchain.chains import LLMChain
4
+ from langchain.prompts import PromptTemplate
5
+ from langchain.memory import ConversationBufferMemory
6
+ from src.data_ingestion import DataIngestion
7
+ from src.RAG import RetrievalModule
8
+ from transformers import pipeline
9
+ import sqlite3
10
+ import time
11
+ from src.logger import logger
12
+
13
+ # Load LLaMA with llama.cppβ€”simple chatter
14
+ llm = LlamaCpp(
15
+ model_path="/Users/nitin/Downloads/llama-2-7b-chat.Q4_0.gguf", # Update this!
16
+ n_ctx=512, # Fits 8 GB
17
+ n_threads=4, # Fast on M3 Pro
18
+ temperature=0.7,
19
+ max_tokens=150,
20
+ verbose=True
21
+ )
22
+
23
+ # Instances
24
+ data_ingestion = DataIngestion()
25
+ retrieval_module = RetrievalModule()
26
+
27
+ # Memory
28
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
29
+
30
+ # Database Setup
31
+ conn = sqlite3.connect("research_data.db")
32
+ cursor = conn.cursor()
33
+ cursor.execute(
34
+ """
35
+ CREATE TABLE IF NOT EXISTS papers (
36
+ query TEXT,
37
+ retrieved_papers TEXT,
38
+ summary TEXT,
39
+ evaluation TEXT
40
+ )
41
+ """
42
+ )
43
+ conn.commit()
44
+
45
+ # Tools (just functions now)
46
+ def retrieve_relevant_papers(topic: str) -> str:
47
+ """Fetch and retrieve relevant papers."""
48
+ titles, abstracts = data_ingestion.fetch_papers(topic)
49
+ if not abstracts:
50
+ logger.warning(f"No papers retrieved for topic: {topic}")
51
+ return "Could not retrieve papers."
52
+ retrieval_module.build_vector_store(abstracts)
53
+ relevant_sections = retrieval_module.retrieve_relevant(topic)
54
+ logger.info(f"Retrieved {len(relevant_sections)} relevant papers for {topic}")
55
+ return "\n".join(relevant_sections)
56
+
57
+ def summarize_text(text: str) -> str:
58
+ """Summarize text using DistilBART."""
59
+ summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-6-6", device="mps")
60
+ text = text[:500] # Keep it short
61
+ summary = summarizer(text, max_length=150, min_length=50, do_sample=False)[0]["summary_text"]
62
+ logger.info("Generated summary for retrieved papers")
63
+ return summary
64
+
65
+ def evaluate_summary(summary: str) -> str:
66
+ """Evaluate summary quality with LLaMA."""
67
+ prompt = f"Evaluate this summary for accuracy, completeness, and clarity: {summary[:200]}"
68
+ evaluation = llm(prompt)
69
+ logger.info("Evaluated summary quality")
70
+ return evaluation
71
+
72
+ # Simple Conversational Chainβ€”no retriever needed
73
+ class ResearchAssistant:
74
+ def __init__(self):
75
+ self.prompt = PromptTemplate(
76
+ input_variables=["chat_history", "query"],
77
+ template="You are a research assistant. Based on the chat history and query, provide a helpful response.\n\nChat History: {chat_history}\nQuery: {query}\n\nResponse: "
78
+ )
79
+ self.chain = LLMChain(llm=llm, prompt=self.prompt, memory=memory)
80
+
81
+ def process_query(self, query: str) -> tuple:
82
+ """Process query with retriesβ€”no ReAct mess."""
83
+ retries = 0
84
+ max_retries = 3
85
+
86
+ while retries < max_retries:
87
+ try:
88
+ # Step 1: Retrieve papers
89
+ retrieved_papers = retrieve_relevant_papers(query)
90
+ if "Could not retrieve papers" in retrieved_papers:
91
+ query = f"more detailed {query}"
92
+ retries += 1
93
+ time.sleep(2)
94
+ continue
95
+
96
+ # Step 2: Summarize
97
+ summary = summarize_text(retrieved_papers)
98
+ if len(summary.split()) < 10:
99
+ retries += 1
100
+ time.sleep(2)
101
+ continue
102
+
103
+ # Step 3: Evaluate
104
+ evaluation = evaluate_summary(summary)
105
+
106
+ # Save to memory and DB
107
+ memory.save_context(
108
+ {"input": query},
109
+ {"output": f"Summary: {summary}\nEvaluation: {evaluation}\nAsk me anything about these findings!"}
110
+ )
111
+ cursor.execute(
112
+ "INSERT INTO papers (query, retrieved_papers, summary, evaluation) VALUES (?, ?, ?, ?)",
113
+ (query, retrieved_papers, summary, evaluation)
114
+ )
115
+ conn.commit()
116
+ return summary, evaluation
117
+
118
+ except Exception as e:
119
+ logger.error(f"Error in processing: {str(e)}")
120
+ retries += 1
121
+ time.sleep(2)
122
+
123
+ logger.error("Max retries reachedβ€”task failed.")
124
+ return "Failed after retries.", "N/A"
125
+
126
+ def chat(self, user_input: str) -> str:
127
+ """Handle follow-up chats."""
128
+ if not memory.chat_memory.messages:
129
+ return "Please start with a research query like 'large language model memory optimization'."
130
+ return self.chain.run(query=user_input)
131
+
132
+ if __name__ == "__main__":
133
+ assistant = ResearchAssistant()
134
+ query = "large language model memory optimization"
135
+ summary, evaluation = assistant.process_query(query)
136
+ print("Summary:", summary)
137
+ print("Evaluation:", evaluation)
138
+ # Test follow-up
139
+ follow_up = "Tell me more about memory optimization."
140
+ print("Follow-up:", assistant.chat(follow_up))
141
+ conn.close()
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/RAG.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.vectorstores import Chroma
2
+ from langchain.embeddings import HuggingFaceEmbeddings
3
+ from src.logger import logger
4
+
5
+ class RetrievalModule:
6
+ def __init__(self, embedding_model="all-MiniLM-L6-v2", persist_dir="./chroma_db"):
7
+ self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model)
8
+ self.vector_store = None
9
+ self.persist_dir = persist_dir # Persistent storage
10
+
11
+ def build_vector_store(self, texts):
12
+ """Build Chroma vector store with better logging."""
13
+ if not texts:
14
+ logger.warning("No texts provided. Skipping vector store creation.")
15
+ return
16
+
17
+ self.vector_store = Chroma.from_texts(
18
+ texts, self.embeddings, persist_directory=self.persist_dir
19
+ )
20
+ self.vector_store.persist()
21
+ logger.info("Chroma vector store successfully built.")
22
+
23
+ def retrieve_relevant(self, query, k=2):
24
+ """Fetch top-k relevant documents, logging warnings if store is empty."""
25
+ if not self.vector_store:
26
+ logger.warning("Vector store is empty. Run `build_vector_store` first.")
27
+ return []
28
+
29
+ top_docs = self.vector_store.similarity_search(query, k=k)
30
+ retrieved = [doc.page_content for doc in top_docs] if top_docs else []
31
+
32
+ logger.info(f"Retrieved {len(retrieved)} relevant papers for query: '{query}'.")
33
+ return retrieved
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/__init__.py ADDED
File without changes
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/data_ingestion.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import xml.etree.ElementTree as ET
3
+ from src.logger import logger
4
+
5
+ class DataIngestion:
6
+ def __init__(self, api_url="http://export.arxiv.org/api/query"):
7
+ self.api_url = api_url
8
+
9
+ def fetch_papers(self, topic, max_results=5):
10
+ """Fetch papers from arXiv with logging and better error handling."""
11
+ url = f"{self.api_url}?search_query=all:{topic}&start=0&max_results={max_results}"
12
+ logger.info(f"Fetching papers from: {url}")
13
+
14
+ try:
15
+ response = requests.get(url, timeout=10) # Added timeout
16
+ response.raise_for_status()
17
+ except requests.exceptions.RequestException as e:
18
+ logger.error(f"Error fetching papers: {e}")
19
+ return [], []
20
+
21
+ # Parse XML
22
+ root = ET.fromstring(response.text)
23
+ titles, abstracts = [], []
24
+
25
+ for entry in root.findall("{http://www.w3.org/2005/Atom}entry"):
26
+ title = entry.find("{http://www.w3.org/2005/Atom}title").text.strip()
27
+ abstract = entry.find("{http://www.w3.org/2005/Atom}summary").text.strip()
28
+ titles.append(title)
29
+ abstracts.append(abstract)
30
+
31
+ logger.info(f"Fetched {len(abstracts)} papers.")
32
+ return titles, abstracts
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/logger.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ LOG_FILE = "logs/research_buddy.log"
5
+ os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True)
6
+
7
+ logging.basicConfig(
8
+ filename=LOG_FILE,
9
+ level=logging.INFO,
10
+ format="%(asctime)s - %(levelname)s - %(message)s",
11
+ )
12
+
13
+ logger = logging.getLogger(__name__)
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/src/query_enhancer.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # query_enhancer.py
2
+ from llama_cpp import Llama
3
+
4
+ class QueryEnhancer:
5
+ def __init__(self, model_path="TheBloke/Llama-2-7B-Chat-GGUF", model_file="llama-2-7b-chat.Q4_0.gguf"):
6
+ """Load LLaMA model with llama.cpp for query enhancement."""
7
+ try:
8
+ self.model = Llama(
9
+ model_path=f"{model_path}/{model_file}", # Full path or download manually
10
+ n_ctx=512, # Context lengthβ€”keep it small for 8 GB
11
+ n_threads=4 # Use 4 CPU threadsβ€”fast on M3 Pro
12
+ )
13
+ print("LLaMA-2-7B loaded successfully with llama.cpp.")
14
+ except Exception as e:
15
+ raise RuntimeError(f"Failed to load LLaMA-2-7B: {str(e)}")
16
+
17
+ def enhance_query(self, user_query):
18
+ """Refine user queries for arXiv search."""
19
+ prompt = (
20
+ f"You are a research assistant. Improve this search query for better research paper results:\n"
21
+ f"Original: {user_query}\n"
22
+ f"Refined: "
23
+ )
24
+ result = self.model(
25
+ prompt,
26
+ max_tokens=50,
27
+ temperature=0.7,
28
+ stop=["\n"] # Stop at newline for clean output
29
+ )
30
+ refined_query = result["choices"][0]["text"].strip()
31
+ return refined_query
32
+
33
+ if __name__ == "__main__":
34
+ # Manually download model to local path if needed
35
+ enhancer = QueryEnhancer(model_path="Downloads/llama-2-7b-chat.Q4_0.gguf ~/models/", model_file="llama-2-7b-chat.Q4_0.gguf")
36
+ print("Enhanced Query:", enhancer.enhance_query("AI in healthcare"))
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/test.ipynb ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import requests\n",
10
+ "url = \"http://export.arxiv.org/api/query?search_query=all:deep+learning&start=0&max_results=5\"\n",
11
+ "response = requests.get(url)"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 3,
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "import xml.etree.ElementTree as ET\n",
21
+ "\n",
22
+ "# Parse the XML response\n",
23
+ "root = ET.fromstring(response.text)\n",
24
+ "\n",
25
+ "# Extract titles and abstracts\n",
26
+ "abstracts = [] # List to store abstracts\n",
27
+ "titles = [] # List to store titles\n",
28
+ "\n",
29
+ "for entry in root.findall(\"{http://www.w3.org/2005/Atom}entry\"):\n",
30
+ " title = entry.find(\"{http://www.w3.org/2005/Atom}title\").text.strip()\n",
31
+ " abstract = entry.find(\"{http://www.w3.org/2005/Atom}summary\").text.strip()\n",
32
+ "\n",
33
+ " titles.append(title)\n",
34
+ " abstracts.append(abstract)"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 4,
40
+ "metadata": {},
41
+ "outputs": [
42
+ {
43
+ "name": "stderr",
44
+ "output_type": "stream",
45
+ "text": [
46
+ "/Users/nitin/Documents/AI Agent project/.venv/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
47
+ " from .autonotebook import tqdm as notebook_tqdm\n",
48
+ "Device set to use mps:0\n"
49
+ ]
50
+ }
51
+ ],
52
+ "source": [
53
+ "from transformers import pipeline\n",
54
+ "# Initialize summarization pipeline\n",
55
+ "summarizer = pipeline(\"summarization\", model=\"facebook/bart-large-cnn\")"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": 5,
61
+ "metadata": {},
62
+ "outputs": [
63
+ {
64
+ "name": "stderr",
65
+ "output_type": "stream",
66
+ "text": [
67
+ "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
68
+ ]
69
+ },
70
+ {
71
+ "name": "stdout",
72
+ "output_type": "stream",
73
+ "text": [
74
+ "Title: Opening the black box of deep learning\n",
75
+ "Summary: The great success of deep learning shows that its technology contains aprofound truth. Understanding its internal mechanism not only has important implications for the development of its technology and effective application in various fields. At present, most of the theoretical research on\n",
76
+ "--------------------------------------------------------------------------------\n",
77
+ "Title: Concept-Oriented Deep Learning\n",
78
+ "Summary: Concepts are the foundation of human deep learning, understanding, and knowledge integration and transfer. We propose concept-oriented deep learning(CODL) which extends (machine) deep learning with concept representations. CODL addresses some\n",
79
+ "--------------------------------------------------------------------------------\n",
80
+ "Title: Deep learning research landscape & roadmap in a nutshell: past, present\n",
81
+ " and future -- Towards deep cortical learning\n",
82
+ "Summary: The past, present and future of deep learning is presented in this work. We predict that deep cortical learning will be the convergence of deeplearning & cortical learning which builds an artificial cortical column.\n",
83
+ "--------------------------------------------------------------------------------\n",
84
+ "Title: A First Look at Deep Learning Apps on Smartphones\n",
85
+ "Summary: First empirical study on 16,500 popular Android apps. Demystifies how smartphone apps exploit deep learning in the wild. Findings paint promising picture of deep learning for smartphones.\n",
86
+ "--------------------------------------------------------------------------------\n",
87
+ "Title: Geometrization of deep networks for the interpretability of deep\n",
88
+ " learning systems\n",
89
+ "Summary: Geometrization is a bridge to connect physics, geometry, deep networkand quantum computation. This may result in a new scheme to reveal the rule of the physical world. It may also help to solve theinterpretability problem of deep\n",
90
+ "--------------------------------------------------------------------------------\n"
91
+ ]
92
+ }
93
+ ],
94
+ "source": [
95
+ "# Summarize abstracts\n",
96
+ "summaries = [summarizer(abs, max_length=50, min_length=25, truncation=True)[0][\"summary_text\"] for abs in abstracts]\n",
97
+ "\n",
98
+ "# Print results\n",
99
+ "for title, summary in zip(titles, summaries):\n",
100
+ " print(f\"Title: {title}\\nSummary: {summary}\\n\" + \"-\"*80)"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": 6,
106
+ "metadata": {},
107
+ "outputs": [
108
+ {
109
+ "name": "stderr",
110
+ "output_type": "stream",
111
+ "text": [
112
+ "/var/folders/cv/01jc2kcx1455j48m7hhwcvqw0000gn/T/ipykernel_17287/3038899353.py:4: LangChainDeprecationWarning: The class `HuggingFaceEmbeddings` was deprecated in LangChain 0.2.2 and will be removed in 1.0. An updated version of the class exists in the :class:`~langchain-huggingface package and should be used instead. To use it run `pip install -U :class:`~langchain-huggingface` and import as `from :class:`~langchain_huggingface import HuggingFaceEmbeddings``.\n",
113
+ " embedding_model = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n"
114
+ ]
115
+ },
116
+ {
117
+ "name": "stdout",
118
+ "output_type": "stream",
119
+ "text": [
120
+ "Top 1 Result:\n",
121
+ "We are in the dawn of deep learning explosion for smartphones. To bridge the\n",
122
+ "gap between research and practice, we present the first empirical study on\n",
123
+ "16,500 the most popular Android apps, demystifying how smartphone apps exploit\n",
124
+ "deep learning in the wild. To this end, we build a new static tool that\n",
125
+ "dissects apps and analyzes their deep learning functions. Our study answers\n",
126
+ "threefold questions: what are the early adopter apps of deep learning, what do\n",
127
+ "they use deep learning for, and how do their deep learning models look like.\n",
128
+ "Our study has strong implications for app developers, smartphone vendors, and\n",
129
+ "deep learning R\\&D. On one hand, our findings paint a promising picture of deep\n",
130
+ "learning for smartphones, showing the prosperity of mobile deep learning\n",
131
+ "frameworks as well as the prosperity of apps building their cores atop deep\n",
132
+ "learning. On the other hand, our findings urge optimizations on deep learning\n",
133
+ "models deployed on smartphones, the protection of these models, and validation\n",
134
+ "of research ideas on these models.\n",
135
+ "--------------------------------------------------------------------------------\n",
136
+ "Top 2 Result:\n",
137
+ "The past, present and future of deep learning is presented in this work.\n",
138
+ "Given this landscape & roadmap, we predict that deep cortical learning will be\n",
139
+ "the convergence of deep learning & cortical learning which builds an artificial\n",
140
+ "cortical column ultimately.\n",
141
+ "--------------------------------------------------------------------------------\n"
142
+ ]
143
+ }
144
+ ],
145
+ "source": [
146
+ "from langchain.vectorstores import FAISS\n",
147
+ "from langchain.embeddings import HuggingFaceEmbeddings\n",
148
+ "# Initialize HuggingFace Embeddings\n",
149
+ "embedding_model = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n",
150
+ "\n",
151
+ "# Create FAISS Vector Store\n",
152
+ "vector_store = FAISS.from_texts(abstracts, embedding_model)\n",
153
+ "\n",
154
+ "# Perform a similarity search (RAG Retrieval Step)\n",
155
+ "query = \"best deep learning advancements\"\n",
156
+ "top_docs = vector_store.similarity_search(query, k=2)\n",
157
+ "\n",
158
+ "# Display Retrieved Documents\n",
159
+ "for i, doc in enumerate(top_docs):\n",
160
+ " print(f\"Top {i+1} Result:\\n{doc.page_content}\\n\" + \"-\"*80)"
161
+ ]
162
+ },
163
+ {
164
+ "cell_type": "code",
165
+ "execution_count": 7,
166
+ "metadata": {},
167
+ "outputs": [
168
+ {
169
+ "name": "stderr",
170
+ "output_type": "stream",
171
+ "text": [
172
+ "Device set to use mps:0\n"
173
+ ]
174
+ },
175
+ {
176
+ "name": "stdout",
177
+ "output_type": "stream",
178
+ "text": [
179
+ "Agent: Time to flex my research muscles!\n",
180
+ "Top papers: ['First empirical study on 16,500 popular Android apps. Demystifies how smartphone apps exploit deep learning in the wild. Findings', 'The past, present and future of deep learning is presented in this work. We predict that deep cortical learning will be the convergence of deep']\n",
181
+ "Best pick: First empirical study on 16,500 popular Android apps. Demystifies how smartphone apps exploit deep learning in the wild. Findings\n"
182
+ ]
183
+ }
184
+ ],
185
+ "source": [
186
+ "# Prerequisites: pip install langchain faiss-cpu transformers requests\n",
187
+ "import requests\n",
188
+ "from langchain.vectorstores import FAISS\n",
189
+ "from langchain.embeddings import HuggingFaceEmbeddings\n",
190
+ "from transformers import pipeline\n",
191
+ "\n",
192
+ "# Step 1: Define the Agent class (our Research Buddy)\n",
193
+ "class ResearchBuddyAgent:\n",
194
+ " def __init__(self):\n",
195
+ " # Load embeddings for RAG\n",
196
+ " self.embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
197
+ " # Load summarizer (small model for speed)\n",
198
+ " self.summarizer = pipeline(\"summarization\", model=\"facebook/bart-large-cnn\")\n",
199
+ " self.papers = [] # Store fetched papers\n",
200
+ "\n",
201
+ " # Step 2: Fetch papers (simple arXiv API call)\n",
202
+ " def fetch_papers(self, topic):\n",
203
+ " url = f\"http://export.arxiv.org/api/query?search_query=all:{topic}&start=0&max_results=5\"\n",
204
+ " response = requests.get(url)\n",
205
+ " # Parse the XML response\n",
206
+ " root = ET.fromstring(response.text)\n",
207
+ "\n",
208
+ " # Extract titles and abstracts\n",
209
+ " abstracts = [] # List to store abstracts\n",
210
+ " titles = [] # List to store titles\n",
211
+ "\n",
212
+ " for entry in root.findall(\"{http://www.w3.org/2005/Atom}entry\"):\n",
213
+ " title = entry.find(\"{http://www.w3.org/2005/Atom}title\").text.strip()\n",
214
+ " abstract = entry.find(\"{http://www.w3.org/2005/Atom}summary\").text.strip()\n",
215
+ "\n",
216
+ " titles.append(title)\n",
217
+ " abstracts.append(abstract)\n",
218
+ "\n",
219
+ " # Step 3: Build RAG vector store and retrieve\n",
220
+ " def retrieve_relevant(self, query):\n",
221
+ " # Create FAISS vector store from papers\n",
222
+ " vector_store = FAISS.from_texts(abstracts, embedding_model)\n",
223
+ " # Search for top matches\n",
224
+ " top_docs = vector_store.similarity_search(query, k=2)\n",
225
+ " return [doc.page_content for doc in top_docs] # Extract text\n",
226
+ "\n",
227
+ " # Step 4: Summarize papers\n",
228
+ " def summarize_papers(self, papers):\n",
229
+ " summaries = []\n",
230
+ " for paper in papers:\n",
231
+ " summary = self.summarizer(paper, max_length=30, min_length=15)[0][\"summary_text\"]\n",
232
+ " summaries.append(summary)\n",
233
+ " return summaries\n",
234
+ "\n",
235
+ " # Step 5: Decide the \"best\" (simple rule: shortest summary wins)\n",
236
+ " def pick_best(self, summaries):\n",
237
+ " best = min(summaries, key=len) # Lazy rule for demo\n",
238
+ " return best\n",
239
+ "\n",
240
+ " # Step 6: Main agent flow (plan and execute)\n",
241
+ " def run(self, topic, query):\n",
242
+ " print(\"Agent: Time to flex my research muscles!\")\n",
243
+ " # Plan: Fetch β†’ Retrieve β†’ Summarize β†’ Pick\n",
244
+ " self.fetch_papers(topic)\n",
245
+ " relevant_papers = self.retrieve_relevant(query)\n",
246
+ " summaries = self.summarize_papers(relevant_papers)\n",
247
+ " best_summary = self.pick_best(summaries)\n",
248
+ " print(f\"Top papers: {summaries}\")\n",
249
+ " print(f\"Best pick: {best_summary}\")\n",
250
+ "\n",
251
+ "# Run the agent\n",
252
+ "if __name__ == \"__main__\":\n",
253
+ " agent = ResearchBuddyAgent()\n",
254
+ " agent.run(topic=\"deep learning\", query=\"best deep learning advancements\")"
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "code",
259
+ "execution_count": null,
260
+ "metadata": {},
261
+ "outputs": [],
262
+ "source": []
263
+ }
264
+ ],
265
+ "metadata": {
266
+ "kernelspec": {
267
+ "display_name": ".venv",
268
+ "language": "python",
269
+ "name": "python3"
270
+ },
271
+ "language_info": {
272
+ "codemirror_mode": {
273
+ "name": "ipython",
274
+ "version": 3
275
+ },
276
+ "file_extension": ".py",
277
+ "mimetype": "text/x-python",
278
+ "name": "python",
279
+ "nbconvert_exporter": "python",
280
+ "pygments_lexer": "ipython3",
281
+ "version": "3.12.2"
282
+ }
283
+ },
284
+ "nbformat": 4,
285
+ "nbformat_minor": 2
286
+ }
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt CHANGED
@@ -6,5 +6,6 @@ transformers
6
  huggingface-hub
7
  loguru
8
  numpy
 
9
  torch
10
-
 
6
  huggingface-hub
7
  loguru
8
  numpy
9
+ sentence-transformers
10
  torch
11
+ chromadb