jb-ALPHA10X commited on
Commit
5208086
·
1 Parent(s): 294ea9c

update tools

Browse files
Files changed (2) hide show
  1. .gitignore +171 -0
  2. app.py +35 -19
.gitignore ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # PyPI configuration file
171
+ .pypirc
app.py CHANGED
@@ -7,10 +7,13 @@ from tools.final_answer import FinalAnswerTool
7
 
8
  from Gradio_UI import GradioUI
9
 
 
 
 
10
 
11
  @tool
12
  def lookup_wikipedia_page(search_query: str) -> str:
13
- """Looks up the exact Wikipedia page title for a given search query
14
  Args:
15
  search_query: The search term to find the Wikipedia page for
16
  """
@@ -43,36 +46,49 @@ def lookup_wikipedia_page(search_query: str) -> str:
43
 
44
  @tool
45
  def get_wikipedia_views(article_title: str, window_size_in_days: int) -> str:
46
- """Fetches view statistics for a Wikipedia article
47
  Args:
48
  article_title: The title of the Wikipedia article to get views for
49
  window_size_in_days: The number of days to get views for
50
  """
51
  try:
52
- # Format the dates (get views for last 7 days)
53
- end_date = datetime.datetime.now()
54
- start_date = end_date - datetime.timedelta(days=window_size_in_days)
55
-
56
- # Format dates as YYYYMMDD for the API
57
- start_str = start_date.strftime('%Y%m%d')
58
- end_str = end_date.strftime('%Y%m%d')
59
 
60
  # Clean article title for URL
61
- article = article_title.replace(' ', '_')
62
 
63
  # Construct API URL
64
- url = f"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents/{article}/daily/{start_str}/{end_str}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- # Make request
67
- response = requests.get(url)
 
 
 
 
68
 
69
- if response.status_code == 200:
70
- data = response.json()
71
- total_views = sum(item['views'] for item in data['items'])
72
- return f"The article '{article_title}' had {total_views} views in the last 7 days"
73
- else:
74
- return f"Error fetching data: {response.status_code}"
75
 
 
 
 
 
 
76
  except Exception as e:
77
  return f"Error processing request: {str(e)}"
78
 
 
7
 
8
  from Gradio_UI import GradioUI
9
 
10
+ from huggingface_hub import login
11
+
12
+ login()
13
 
14
  @tool
15
  def lookup_wikipedia_page(search_query: str) -> str:
16
+ """Looks up the exact Wikipedia page title for a given search query, allowing to get the exact page title for subsequent wikipedia views tool
17
  Args:
18
  search_query: The search term to find the Wikipedia page for
19
  """
 
46
 
47
  @tool
48
  def get_wikipedia_views(article_title: str, window_size_in_days: int) -> str:
49
+ """Fetches view statistics for a Wikipedia article. The article title is the exact page title from the lookup_wikipedia_page tool.
50
  Args:
51
  article_title: The title of the Wikipedia article to get views for
52
  window_size_in_days: The number of days to get views for
53
  """
54
  try:
 
 
 
 
 
 
 
55
 
56
  # Clean article title for URL
57
+ article_title = article_title.replace(' ', '_')
58
 
59
  # Construct API URL
60
+ # First get the page ID using the API
61
+ id_url = "https://en.wikipedia.org/w/api.php"
62
+ id_params = {
63
+ "action": "query",
64
+ "format": "json",
65
+ "titles": article_title,
66
+ "prop": "pageviews",
67
+ "pvipdays": window_size_in_days
68
+ }
69
+
70
+ # Get the response data
71
+ id_response = requests.get(id_url, params=id_params)
72
+ if id_response.status_code != 200:
73
+ return f"Error getting page ID: {id_response.status_code}"
74
+
75
+ id_data = id_response.json()
76
 
77
+ # Check if pages data exists
78
+ if "pages" not in id_data["query"]:
79
+ return f"Could not find page '{article_title}'"
80
+
81
+ # Get the first page's data
82
+ page_data = next(iter(id_data["query"]["pages"].values()))
83
 
84
+ if "pageviews" not in page_data:
85
+ return f"No pageview data available for '{article_title}'"
 
 
 
 
86
 
87
+ # Calculate total views
88
+ total_views = sum(views for views in page_data["pageviews"].values() if views is not None)
89
+
90
+ return f"The article '{article_title}' had {total_views} views in the last {window_size_in_days} days"
91
+
92
  except Exception as e:
93
  return f"Error processing request: {str(e)}"
94