abtsousa commited on
Commit
a40ea82
·
1 Parent(s): 603a029

Refactor Wikipedia tool to simplify search functionality and remove unused parameters

Browse files
Files changed (1) hide show
  1. tools/wikipedia.py +6 -135
tools/wikipedia.py CHANGED
@@ -3,141 +3,12 @@ from langchain_community.document_loaders import WikipediaLoader
3
  from typing import Optional
4
 
5
  @tool
6
- def fetch_wikipedia_content(query: str, top_k: int = 2, max_chars_per_doc: int = 16000, specific_section: Optional[str] = None) -> dict[str, str]:
7
- """Wikipedia search with optional section-based content management.
8
-
9
- Args:
10
- query: The search query.
11
- top_k (Optional): The number of top results to return (default: 2).
12
- max_chars_per_doc (Optional): Maximum characters per document (default: 16000).
13
- specific_section (Optional): If provided, return only this specific section (use section name from sections list).
14
- """
15
- # Load documents with full content to analyze sections
16
- search_docs = WikipediaLoader(
17
- query=query,
18
- load_max_docs=top_k,
19
- doc_content_chars_max=128000 # Load full content to analyze sections
20
- ).load()
21
-
22
- formatted_docs = []
23
- for doc in search_docs:
24
- content = doc.page_content
25
- source = doc.metadata.get("source", "")
26
- page = doc.metadata.get("page", "")
27
-
28
- if specific_section:
29
- # Return only the requested section
30
- section_content = _extract_specific_section(content, specific_section)
31
- if section_content:
32
- formatted_doc = f'<Document source="{source}" page="{page}" section="{specific_section}"/>\n{section_content}\n</Document>'
33
- else:
34
- formatted_doc = f'<Document source="{source}" page="{page}"/>\nSection "{specific_section}" not found. Available sections: {_get_section_names(content)}\n</Document>'
35
- else:
36
- # Process with section-based distribution
37
- processed_content, sections_info = _process_content_by_sections(content, max_chars_per_doc)
38
- formatted_doc = f'<Document source="{source}" page="{page}"/>\n{sections_info}\n\n{processed_content}\n</Document>'
39
-
40
- formatted_docs.append(formatted_doc)
41
-
42
- return {"wiki_results": "\n\n---\n\n".join(formatted_docs)}
43
-
44
- def _get_sections(content: str) -> list[tuple[str, str]]:
45
- """Parse Wikipedia content into top-level sections only.
46
-
47
- Returns:
48
- List of (section_name, section_content) tuples
49
  """
50
- sections = []
51
- lines = content.split('\n')
52
- current_section = "Introduction"
53
- current_content = []
54
-
55
- for line in lines:
56
- # Check if this is a TOP-LEVEL section header (== but not ===)
57
- if line.strip().startswith('==') and not line.strip().startswith('==='):
58
- # Save previous section
59
- if current_content:
60
- sections.append((current_section, '\n'.join(current_content)))
61
-
62
- # Start new section
63
- current_section = line.strip().replace('=', '').strip()
64
- current_content = []
65
- else:
66
- current_content.append(line)
67
-
68
- # Add the last section
69
- if current_content:
70
- sections.append((current_section, '\n'.join(current_content)))
71
-
72
- return sections
73
-
74
- def _get_section_names(content: str) -> str:
75
- """Get a comma-separated list of section names."""
76
- sections = _get_sections(content)
77
- return ", ".join([section[0] for section in sections])
78
 
79
- def _extract_specific_section(content: str, section_name: str) -> Optional[str]:
80
- """Extract a specific section from Wikipedia content."""
81
- sections = _get_sections(content)
82
-
83
- # Look for exact match first
84
- for name, content_part in sections:
85
- if name.lower() == section_name.lower():
86
- return content_part
87
-
88
- # Look for partial match
89
- for name, content_part in sections:
90
- if section_name.lower() in name.lower():
91
- return content_part
92
-
93
- return None
94
-
95
- def _process_content_by_sections(content: str, max_chars: int) -> tuple[str, str]:
96
- """Process content by distributing characters equally across sections.
97
-
98
- Returns:
99
- Tuple of (processed_content, sections_info)
100
  """
101
- sections = _get_sections(content)
102
-
103
- # If content is under limit, return as is
104
- total_chars = len(content)
105
- if total_chars <= max_chars:
106
- sections_info = f"Available sections: {_get_section_names(content)}"
107
- return content, sections_info
108
-
109
- # Calculate chars per section
110
- num_sections = len(sections)
111
- if num_sections == 0:
112
- return content[:max_chars] + "...", "No sections found, content truncated"
113
-
114
- chars_per_section = max_chars // num_sections
115
- reserve_chars = max_chars % num_sections # Extra chars to distribute
116
-
117
- processed_sections = []
118
-
119
- for i, (section_name, section_content) in enumerate(sections):
120
- # Give some sections a few extra characters from the reserve
121
- section_limit = chars_per_section + (1 if i < reserve_chars else 0)
122
-
123
- if len(section_content) > section_limit:
124
- truncated_content = section_content[:section_limit-3] + "..."
125
- else:
126
- truncated_content = section_content
127
-
128
- processed_sections.append(f"== {section_name} ==\n{truncated_content}")
129
-
130
- sections_info = f"Content truncated due to {max_chars} char limit. Available sections: {_get_section_names(content)}. Use specific_section parameter to load any section in full."
131
-
132
- return "\n\n".join(processed_sections), sections_info
133
-
134
- if __name__ == "__main__":
135
- # Example usage
136
- query = "Albert Einstein"
137
- result = fetch_wikipedia_content.invoke(query, top_k=1, max_chars_per_doc=16000)
138
- print(result['wiki_results'])
139
-
140
- # Very small page example
141
- query = "Nico Ditch"
142
- result = fetch_wikipedia_content.invoke(query, top_k=1, max_chars_per_doc=16000)
143
- print(result['wiki_results'])
 
3
  from typing import Optional
4
 
5
  @tool
6
+ def wiki_search(query: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  """
8
+ Search Wikipedia for a given query and return the summary.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ Args:
11
+ query (str): The search query to find relevant Wikipedia articles.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  """
13
+ loader = WikipediaLoader(query=query, load_max_docs=2).load()
14
+ return "\n---\n".join([f'<Document source="{doc.metadata["source"]}"/>\n{doc.page_content}\n</Document>' for doc in loader])