Spaces:
Running
Running
Initial commit
Browse files- Dockerfile +59 -0
- aclanthology.py +203 -0
- main.py +269 -0
- metrics.py +27 -0
- pdf.py +14 -0
- plots.py +162 -0
- requirements.txt +10 -0
- s2.py +499 -0
Dockerfile
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Starting from the Grobid image
|
2 |
+
FROM lfoppiano/grobid:0.7.3
|
3 |
+
|
4 |
+
# Setting the user to root for installation purposes
|
5 |
+
USER root
|
6 |
+
|
7 |
+
# Create necessary directories for Grobid
|
8 |
+
RUN mkdir -m 777 -p /opt/grobid/grobid-home/tmp
|
9 |
+
|
10 |
+
# Give permissions to the default supervisord log directory and Gradio logs
|
11 |
+
RUN mkdir -p /var/log/supervisor && chmod -R 777 /var/log/supervisor
|
12 |
+
RUN mkdir -p /var/run/supervisor && chmod 777 /var/run/supervisor
|
13 |
+
RUN mkdir -p /var/log/gradio && chmod 777 /var/log/gradio
|
14 |
+
|
15 |
+
# Install supervisord and python (for gradio)
|
16 |
+
RUN apt-get update && apt-get install -y supervisor python3 python3-pip git && rm -rf /var/lib/apt/lists/*
|
17 |
+
RUN pip3 install gradio
|
18 |
+
RUN pip3 install git+https://github.com/titipata/scipdf_parser
|
19 |
+
RUN pip3 install git+https://github.com/coderanger/supervisor-stdout
|
20 |
+
|
21 |
+
# Copy your gradio app to the image
|
22 |
+
COPY . /app/
|
23 |
+
COPY ./data /app/data
|
24 |
+
|
25 |
+
# Install gradio
|
26 |
+
RUN pip3 install -r /app/requirements.txt
|
27 |
+
|
28 |
+
# Download spacy en_core_web_sm
|
29 |
+
RUN python3 -m spacy download en_core_web_sm
|
30 |
+
|
31 |
+
# Supervisord configuration
|
32 |
+
RUN echo "[supervisord]" > /etc/supervisor/conf.d/supervisord.conf && \
|
33 |
+
echo "nodaemon=true" >> /etc/supervisor/conf.d/supervisord.conf && \
|
34 |
+
echo "[rpcinterface:supervisor]" >> /etc/supervisor/conf.d/supervisord.conf && \
|
35 |
+
echo "supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface" >> /etc/supervisor/conf.d/supervisord.conf && \
|
36 |
+
echo "" >> /etc/supervisor/conf.d/supervisord.conf && \
|
37 |
+
echo "[unix_http_server]" >> /etc/supervisor/conf.d/supervisord.conf && \
|
38 |
+
echo "file=/tmp/supervisor.sock" >> /etc/supervisor/conf.d/supervisord.conf && \
|
39 |
+
echo "" >> /etc/supervisor/conf.d/supervisord.conf && \
|
40 |
+
echo "[program:grobid]" >> /etc/supervisor/conf.d/supervisord.conf && \
|
41 |
+
echo "command=/opt/grobid/grobid-service/bin/grobid-service" >> /etc/supervisor/conf.d/supervisord.conf && \
|
42 |
+
echo "" >> /etc/supervisor/conf.d/supervisord.conf && \
|
43 |
+
echo "[program:gradio]" >> /etc/supervisor/conf.d/supervisord.conf && \
|
44 |
+
echo "command=python3 /app/main.py" >> /etc/supervisor/conf.d/supervisord.conf && \
|
45 |
+
echo "stdout_logfile=/dev/fd/1" >> /etc/supervisor/conf.d/supervisord.conf && \
|
46 |
+
echo "stdout_logfile_maxbytes=0" >> /etc/supervisor/conf.d/supervisord.conf && \
|
47 |
+
echo "redirect_stderr=true" >> /etc/supervisor/conf.d/supervisord.conf && \
|
48 |
+
echo "stdout_events_enabled=true" >> /etc/supervisor/conf.d/supervisord.conf && \
|
49 |
+
echo "stderr_events_enabled=true" >> /etc/supervisor/conf.d/supervisord.conf && \
|
50 |
+
echo "" >> /etc/supervisor/conf.d/supervisord.conf && \
|
51 |
+
echo "[eventlistener:stdout]" >> /etc/supervisor/conf.d/supervisord.conf && \
|
52 |
+
echo "command = supervisor_stdout" >> /etc/supervisor/conf.d/supervisord.conf && \
|
53 |
+
echo "buffer_size = 100" >> /etc/supervisor/conf.d/supervisord.conf && \
|
54 |
+
echo "events = PROCESS_LOG" >> /etc/supervisor/conf.d/supervisord.conf && \
|
55 |
+
echo "result_handler = supervisor_stdout:event_handler" >> /etc/supervisor/conf.d/supervisord.conf
|
56 |
+
|
57 |
+
|
58 |
+
# Start processes with supervisord
|
59 |
+
CMD ["/usr/bin/supervisord"]
|
aclanthology.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
import asyncio
|
5 |
+
import json
|
6 |
+
|
7 |
+
import aiohttp
|
8 |
+
import requests
|
9 |
+
from bs4 import BeautifulSoup
|
10 |
+
|
11 |
+
|
12 |
+
async def fetch(session, url):
|
13 |
+
"""Asynchronous function to fetch a URL using aiohttp."""
|
14 |
+
async with session.get(url) as response:
|
15 |
+
return await response.text()
|
16 |
+
|
17 |
+
|
18 |
+
async def async_match_acl_id_to_s2_paper(acl_id):
|
19 |
+
"""
|
20 |
+
Fetches the paper information from the Semantic Scholar API for the given ACL ID.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
acl_id (str): The ACL ID of the paper to fetch.
|
24 |
+
|
25 |
+
Returns:
|
26 |
+
dict: A dictionary containing the paper information.
|
27 |
+
"""
|
28 |
+
url = f"https://api.semanticscholar.org/graph/v1/paper/ACL:{acl_id}"
|
29 |
+
async with aiohttp.ClientSession() as session:
|
30 |
+
res_text = await fetch(session, url)
|
31 |
+
return json.loads(res_text)
|
32 |
+
|
33 |
+
|
34 |
+
def extract_paper_info(paper_url):
|
35 |
+
"""
|
36 |
+
Extracts information about a paper from its ACL Anthology URL.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
paper_url (str): The URL of the paper on the ACL Anthology website.
|
40 |
+
|
41 |
+
Returns:
|
42 |
+
dict: A dictionary containing the title, authors, and ACL Anthology ID of the paper.
|
43 |
+
"""
|
44 |
+
html_doc = requests.get(paper_url, timeout=10).text
|
45 |
+
soup = BeautifulSoup(html_doc, "html.parser")
|
46 |
+
|
47 |
+
title = soup.find("h2", id="title").text.strip()
|
48 |
+
authors = [
|
49 |
+
a.text
|
50 |
+
for a in soup.find_all("a")
|
51 |
+
if a.parent.name == "p" and a.parent["class"] == ["lead"]
|
52 |
+
]
|
53 |
+
acl_id = paper_url.split("/")[-2]
|
54 |
+
|
55 |
+
return {"title": title, "authors": authors, "acl_id": acl_id}
|
56 |
+
|
57 |
+
|
58 |
+
def extract_author_info(author_url):
|
59 |
+
"""
|
60 |
+
Extracts author information from the given author URL.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
author_url (str): The URL of the author's page on ACL Anthology.
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
dict: A dictionary containing the author's name and a list of their papers.
|
67 |
+
Each paper is represented as a dictionary with keys "title" and "url".
|
68 |
+
"""
|
69 |
+
html_doc = requests.get(author_url, timeout=10).text
|
70 |
+
soup = BeautifulSoup(html_doc, "html.parser")
|
71 |
+
|
72 |
+
author_name = soup.find("h2", id="title").text.strip()
|
73 |
+
paper_elements = soup.find_all("p")
|
74 |
+
papers = []
|
75 |
+
for paper in paper_elements:
|
76 |
+
links = paper.find_all("a")
|
77 |
+
# Filter out a with text pdf and bib
|
78 |
+
links = [
|
79 |
+
l for l in links if l.text.strip() not in ["pdf", "bib", "abs"]
|
80 |
+
]
|
81 |
+
if not links:
|
82 |
+
continue
|
83 |
+
title = links[0].text.strip()
|
84 |
+
url = "https://aclanthology.org" + links[0]["href"]
|
85 |
+
papers.append({"title": title, "url": url})
|
86 |
+
|
87 |
+
return {"author": author_name, "papers": papers}
|
88 |
+
|
89 |
+
|
90 |
+
def extract_venue_info(venue_url):
|
91 |
+
"""
|
92 |
+
Extracts venue information from the given URL.
|
93 |
+
|
94 |
+
Args:
|
95 |
+
venue_url (str): The URL of the venue to extract information from.
|
96 |
+
|
97 |
+
Returns:
|
98 |
+
dict: A dictionary containing the venue name and a list of papers with their titles and URLs.
|
99 |
+
"""
|
100 |
+
html_doc = requests.get(venue_url, timeout=10).text
|
101 |
+
soup = BeautifulSoup(html_doc, "html.parser")
|
102 |
+
|
103 |
+
venue_name = soup.find("h2", id="title").text.strip()
|
104 |
+
paper_elements = soup.find_all("p")
|
105 |
+
papers = []
|
106 |
+
for paper in paper_elements:
|
107 |
+
links = paper.find_all("a")
|
108 |
+
# Filter out a with text pdf and bib
|
109 |
+
links = [
|
110 |
+
l for l in links if l.text.strip() not in ["pdf", "bib", "abs"]
|
111 |
+
]
|
112 |
+
if not links:
|
113 |
+
continue
|
114 |
+
title = links[0].text.strip()
|
115 |
+
url = "https://aclanthology.org" + links[0]["href"]
|
116 |
+
papers.append({"title": title, "url": url})
|
117 |
+
|
118 |
+
return {"venue": venue_name, "papers": papers}
|
119 |
+
|
120 |
+
|
121 |
+
def determine_page_type(url):
|
122 |
+
"""
|
123 |
+
Determine the type of ACL Anthology page given its URL.
|
124 |
+
|
125 |
+
Args:
|
126 |
+
url (str): The URL to be checked.
|
127 |
+
|
128 |
+
Returns:
|
129 |
+
str: "paper", "author", or "venue". Returns None if the type can't be determined.
|
130 |
+
"""
|
131 |
+
# Extract last segments from the URL
|
132 |
+
segments = [segment for segment in url.split("/") if segment]
|
133 |
+
|
134 |
+
# Check if the URL points to an event (venue)
|
135 |
+
if "events" in url or "volumes" in url:
|
136 |
+
return "venue"
|
137 |
+
|
138 |
+
# If URL ends in a pattern like "2023.acl-long.1" it's a paper
|
139 |
+
if len(segments) > 1 and segments[-2].isnumeric() and "." in segments[-1]:
|
140 |
+
return "paper"
|
141 |
+
|
142 |
+
if "people" in url:
|
143 |
+
return "author"
|
144 |
+
|
145 |
+
# If none of the above rules apply, fetch the page and check its content
|
146 |
+
try:
|
147 |
+
html_doc = requests.get(url, timeout=10).text
|
148 |
+
soup = BeautifulSoup(html_doc, "html.parser")
|
149 |
+
|
150 |
+
# Check for unique elements specific to each page type
|
151 |
+
if soup.find("h2", id="title"):
|
152 |
+
return (
|
153 |
+
"author"
|
154 |
+
if soup.find("a", href=True, text="Google Scholar")
|
155 |
+
else "paper"
|
156 |
+
)
|
157 |
+
elif soup.find("h1", text="Anthology Volume"):
|
158 |
+
return "venue"
|
159 |
+
except Exception as e:
|
160 |
+
print(f"Error determining page type: {e}")
|
161 |
+
|
162 |
+
return None
|
163 |
+
|
164 |
+
|
165 |
+
if __name__ == "__main__":
|
166 |
+
loop = asyncio.get_event_loop()
|
167 |
+
|
168 |
+
urls = [
|
169 |
+
"https://aclanthology.org/2023.acl-long.1/",
|
170 |
+
"https://aclanthology.org/people/a/anna-rogers/",
|
171 |
+
"https://aclanthology.org/events/acl-2022/",
|
172 |
+
]
|
173 |
+
|
174 |
+
for url in urls:
|
175 |
+
if determine_page_type(url) == "paper":
|
176 |
+
print(f"Paper: {url}")
|
177 |
+
res = extract_paper_info(url)
|
178 |
+
paper = loop.run_until_complete(
|
179 |
+
async_match_acl_id_to_s2_paper(res["acl_id"])
|
180 |
+
)
|
181 |
+
print(paper)
|
182 |
+
|
183 |
+
elif determine_page_type(url) == "author":
|
184 |
+
print(f"Author: {url}")
|
185 |
+
res = extract_author_info(url)
|
186 |
+
tasks = [
|
187 |
+
async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
|
188 |
+
for paper in res["papers"]
|
189 |
+
]
|
190 |
+
s2_ids = loop.run_until_complete(asyncio.gather(*tasks))
|
191 |
+
for paper, s2_id in zip(res["papers"], s2_ids):
|
192 |
+
print(paper["paperId"])
|
193 |
+
|
194 |
+
elif determine_page_type(url) == "venue":
|
195 |
+
print(f"Venue: {url}")
|
196 |
+
res = extract_venue_info(url)
|
197 |
+
tasks = [
|
198 |
+
async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
|
199 |
+
for paper in res["papers"]
|
200 |
+
]
|
201 |
+
s2_ids = loop.run_until_complete(asyncio.gather(*tasks))
|
202 |
+
for paper, s2_id in zip(res["papers"], s2_ids):
|
203 |
+
print(paper["paperId"])
|
main.py
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
|
2 |
+
# All rights reserved.
|
3 |
+
# Thanks to Mukund Rungta for inspiration on early versions of this demo https://huggingface.co/spaces/mrungta8/CitationalAmnesia
|
4 |
+
|
5 |
+
|
6 |
+
import asyncio
|
7 |
+
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
from aclanthology import determine_page_type
|
11 |
+
from plots import generate_cfdi_plot, generate_maoc_plot
|
12 |
+
from s2 import (check_s2_id_type, compute_stats_for_acl_author,
|
13 |
+
compute_stats_for_acl_paper, compute_stats_for_acl_venue,
|
14 |
+
compute_stats_for_pdf, compute_stats_for_s2_author,
|
15 |
+
compute_stats_for_s2_paper)
|
16 |
+
|
17 |
+
|
18 |
+
def return_clear():
|
19 |
+
"""Clearing all demo inputs
|
20 |
+
|
21 |
+
Returns:
|
22 |
+
None
|
23 |
+
"""
|
24 |
+
return None, None, None, None, None, None, None, None
|
25 |
+
|
26 |
+
|
27 |
+
def create_compute_stats(submit_type=None):
|
28 |
+
def compute_stats(s2_id=None, pdf_file=None, acl_link=None):
|
29 |
+
if submit_type == "s2_id" and s2_id:
|
30 |
+
# Check if s2_id is a paper id or an author id
|
31 |
+
id_type, author_name = check_s2_id_type(s2_id)
|
32 |
+
if id_type == "paper":
|
33 |
+
results = compute_stats_for_s2_paper(s2_id)
|
34 |
+
results = results + ("paper",)
|
35 |
+
return plot_and_return_stats(*results)
|
36 |
+
if id_type == "author":
|
37 |
+
results = compute_stats_for_s2_author(s2_id, author_name)
|
38 |
+
results = results + ("author",)
|
39 |
+
return plot_and_return_stats(*results)
|
40 |
+
if submit_type == "acl_link" and acl_link:
|
41 |
+
# Crawl all papers for the author or venue or just the paper if it is a paper link
|
42 |
+
url_type = determine_page_type(acl_link)
|
43 |
+
if url_type == "paper":
|
44 |
+
results = compute_stats_for_acl_paper(acl_link)
|
45 |
+
results = results + ("paper",)
|
46 |
+
return plot_and_return_stats(*results)
|
47 |
+
if url_type == "author":
|
48 |
+
results = compute_stats_for_acl_author(acl_link)
|
49 |
+
results = results + ("author",)
|
50 |
+
return plot_and_return_stats(*results)
|
51 |
+
if url_type == "venue":
|
52 |
+
results = compute_stats_for_acl_venue(acl_link)
|
53 |
+
results = results + ("proceedings",)
|
54 |
+
return plot_and_return_stats(*results)
|
55 |
+
if submit_type == "pdf_file" and pdf_file:
|
56 |
+
# Compute the citation field diversity index and citation age diversity index
|
57 |
+
results = asyncio.run(compute_stats_for_pdf(pdf_file))
|
58 |
+
results = results + ("paper",)
|
59 |
+
return plot_and_return_stats(*results)
|
60 |
+
return None, None, None, None, None, None, None, None
|
61 |
+
|
62 |
+
return compute_stats
|
63 |
+
|
64 |
+
|
65 |
+
def plot_and_return_stats(
|
66 |
+
title_authors,
|
67 |
+
num_references,
|
68 |
+
field_counts,
|
69 |
+
year_title_dict,
|
70 |
+
cfdi,
|
71 |
+
cadi,
|
72 |
+
maoc,
|
73 |
+
compute_type,
|
74 |
+
):
|
75 |
+
"""
|
76 |
+
Plots the data and returns statistics.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
title_authors (str): The title and authors of the paper.
|
80 |
+
num_references (int): The number of references in the paper.
|
81 |
+
field_counts (dict): A dictionary containing the count of each field.
|
82 |
+
year_title_dict (dict): A dictionary containing the year and title of each paper.
|
83 |
+
cfdi (list): A list of tuples containing the citation field and the number of papers in that field.
|
84 |
+
cadi (list): A list of tuples containing the citation author and the number of papers by that author.
|
85 |
+
maoc (list): A list of tuples containing the main author and the number of papers by that author.
|
86 |
+
|
87 |
+
Returns:
|
88 |
+
tuple: A tuple containing the title and authors of the paper, the number of references, the top 3 most cited fields,
|
89 |
+
the most common oldest papers, the cfdi, cadi, and the plots for cfdi and maoc.
|
90 |
+
"""
|
91 |
+
# Generate cfdi plot
|
92 |
+
plot_cfdi = generate_cfdi_plot(cfdi, compute_type)
|
93 |
+
|
94 |
+
# Generate cadi plot
|
95 |
+
# plot_maoc = generate_maoc_plot(maoc, compute_type)
|
96 |
+
|
97 |
+
# Get top 3 most cited fields
|
98 |
+
top_fields_text = "\n".join(
|
99 |
+
[
|
100 |
+
f"{field}: {count}"
|
101 |
+
for field, count in sorted(
|
102 |
+
field_counts.items(), reverse=True, key=lambda x: x[1]
|
103 |
+
)[:3]
|
104 |
+
]
|
105 |
+
)
|
106 |
+
|
107 |
+
# Get most common oldest papers
|
108 |
+
# oldest_paper_text = "".join(
|
109 |
+
# f"[{str(year)}] {title}" + "\n"
|
110 |
+
# for year, title in sorted(year_title_dict.items())[:3]
|
111 |
+
# )
|
112 |
+
|
113 |
+
return (
|
114 |
+
title_authors,
|
115 |
+
num_references,
|
116 |
+
top_fields_text,
|
117 |
+
# oldest_paper_text,
|
118 |
+
cfdi,
|
119 |
+
# cadi,
|
120 |
+
plot_cfdi,
|
121 |
+
# plot_maoc,
|
122 |
+
)
|
123 |
+
|
124 |
+
|
125 |
+
with gr.Blocks(
|
126 |
+
theme=gr.themes.Soft()
|
127 |
+
) as demo:
|
128 |
+
with gr.Row():
|
129 |
+
gr.Markdown(
|
130 |
+
"""
|
131 |
+
# Citation Field Diversity Calculator
|
132 |
+
|
133 |
+
Welcome to this interactive demo to analyze the field diversity aspect of your citational practice. This tool will enable you to reflect on a critical aspect:
|
134 |
+
|
135 |
+
- By whom am I influenced? Which fields heavily inform and shape the research trajectory of my works?
|
136 |
+
|
137 |
+
In addition, you will be able to analyze how the above compares to the average paper or author. The results you will receive cannot be categorized into “good” or “bad”. Instead, they are meant to raise self-awareness about one’s citational diversity and reflect on it. The results might bring you to further questions, such as:
|
138 |
+
|
139 |
+
- Am I reading widely across fields?
|
140 |
+
- Should I expand my literature search to include works from other fields?
|
141 |
+
|
142 |
+
Using citations as a tangible marker of influence, our demo provides empirical insights into the influence of papers across fields.
|
143 |
+
|
144 |
+
## What is Citation Field Diversity?
|
145 |
+
|
146 |
+
Field diversity is a measure of the variety of research fields that a paper or an author draws upon. A high field diversity indicates that the work draws from various distinct research fields, demonstrating a multidisciplinary influence on that work or author.
|
147 |
+
|
148 |
+
## What is the Citation Field Diversity Index (CFDI) and how is it calculated?
|
149 |
+
|
150 |
+
The calculation of Field Diversity involves extracting all the references of a paper, categorizing them into distinct study fields, and determining the proportion of each study field over all the references. The Citation Field Diversity Index (CFDI) is then computed by applying the Gini Index on these proportions.
|
151 |
+
|
152 |
+
For more details, please refer to Eq. 3 in [this paper](https://aclanthology.org/2023.acl-long.341/).
|
153 |
+
"""
|
154 |
+
)
|
155 |
+
|
156 |
+
gr.Markdown(
|
157 |
+
"""
|
158 |
+
## How do I Interpret CFDI?
|
159 |
+
|
160 |
+
Higher values of CFDI indicate a greater diversity of a paper in terms of the fields it cites, signifying a multidisciplinary influence. On the other hand, lower values signify a lower diversity, indicating that citations are more concentrated in specific fields.
|
161 |
+
|
162 |
+
## How can I use this demo?
|
163 |
+
|
164 |
+
There are three ways for you to compute the field diversity for papers:
|
165 |
+
1. **Semantic Scholar ID**: Enter the Semantic Scholar ID of a **paper** or **author** and click the *"Compute"* button.
|
166 |
+
2. **ACL Anthology Link**: Paste the ACL Anthology link of a **paper**, **venue**, or **author** and click the *"Compute"* button.
|
167 |
+
3. **PDF File**: Upload your **paper** PDF and click the *"Compute"* button.
|
168 |
+
|
169 |
+
To retrieve the **Semantic Scholar ID** for a paper such as "The Elephant in the Room: Analyzing the Presence of Big Tech in Natural Language Processing Research," search the paper on Semantic Scholar [here](https://www.semanticscholar.org/paper/The-Elephant-in-the-Room%3A-Analyzing-the-Presence-of-Abdalla-Wahle/587ffdfd7229e8e0dbc5250b44df5fad6251f6ad) and use the last part of the URL. The Semantic Scholar ID (SSID) for this paper is: **587ffdfd7229e8e0dbc5250b44df5fad6251f6ad**.
|
170 |
+
|
171 |
+
To get an ACL Anthology link, you can go to any ACL Anthology paper, author or proceedings page and just copy and paste the url. For example:
|
172 |
+
- https://aclanthology.org/2023.acl-long.1/
|
173 |
+
- https://aclanthology.org/people/a/anna-rogers/
|
174 |
+
- https://aclanthology.org/events/acl-2002/
|
175 |
+
"""
|
176 |
+
)
|
177 |
+
|
178 |
+
with gr.Row():
|
179 |
+
with gr.Tabs():
|
180 |
+
with gr.TabItem("Semantic Scholar ID"):
|
181 |
+
s2_id = gr.Textbox(
|
182 |
+
label="Semantic Scholar ID",
|
183 |
+
placeholder=(
|
184 |
+
"Enter the Semantic Scholar ID here and press enter..."
|
185 |
+
),
|
186 |
+
# value="587ffdfd7229e8e0dbc5250b44df5fad6251f6ad",
|
187 |
+
)
|
188 |
+
with gr.Row():
|
189 |
+
s2_submit_btn = gr.Button("Compute")
|
190 |
+
with gr.TabItem("ACL Anthology Link"):
|
191 |
+
acl_link = gr.Textbox(
|
192 |
+
label="ACL Anthology Link",
|
193 |
+
placeholder="Paste the ACL Anthology link here...",
|
194 |
+
)
|
195 |
+
with gr.Row():
|
196 |
+
acl_submit_btn = gr.Button("Compute")
|
197 |
+
with gr.TabItem("PDF File"):
|
198 |
+
pdf_file = gr.File(
|
199 |
+
file_types=[".pdf"], label="Upload your paper PDF"
|
200 |
+
)
|
201 |
+
with gr.Row():
|
202 |
+
file_submit_btn = gr.Button("Compute")
|
203 |
+
with gr.Row():
|
204 |
+
title = gr.Textbox(
|
205 |
+
label="Title / Author Name / Venue Name:", lines=2
|
206 |
+
) # Can be either paper title, author name, or proceedings title
|
207 |
+
with gr.Row():
|
208 |
+
num_ref = gr.Textbox(label="Number of references", lines=3)
|
209 |
+
top_field_list = gr.Textbox(label="Top 3 fields cited:", lines=3)
|
210 |
+
# top_age_list = gr.Textbox(label="Top 3 oldest papers cited:", lines=3)
|
211 |
+
with gr.Row():
|
212 |
+
cfdi = gr.Textbox(label="CFDI")
|
213 |
+
# cadi = gr.Textbox(label="CADI")
|
214 |
+
with gr.Row():
|
215 |
+
cfdi_plot = gr.Plot(label="Citation Field Diversity")
|
216 |
+
# cadi_plot = gr.Plot(label="Citation Age Diversity")
|
217 |
+
with gr.Row():
|
218 |
+
clear_btn = gr.Button("Clear")
|
219 |
+
|
220 |
+
submit_args = dict(
|
221 |
+
inputs=[s2_id, pdf_file, acl_link],
|
222 |
+
outputs=[
|
223 |
+
title,
|
224 |
+
num_ref,
|
225 |
+
top_field_list,
|
226 |
+
# top_age_list,
|
227 |
+
cfdi,
|
228 |
+
# cadi,
|
229 |
+
cfdi_plot,
|
230 |
+
# cadi_plot,
|
231 |
+
],
|
232 |
+
)
|
233 |
+
|
234 |
+
s2_submit_args = submit_args.copy()
|
235 |
+
s2_submit_args["fn"] = create_compute_stats(submit_type="s2_id")
|
236 |
+
|
237 |
+
acl_submit_args = submit_args.copy()
|
238 |
+
acl_submit_args["fn"] = create_compute_stats(submit_type="acl_link")
|
239 |
+
|
240 |
+
file_submit_args = submit_args.copy()
|
241 |
+
file_submit_args["fn"] = create_compute_stats(submit_type="pdf_file")
|
242 |
+
|
243 |
+
s2_id.submit(**s2_submit_args)
|
244 |
+
acl_link.submit(**acl_submit_args)
|
245 |
+
|
246 |
+
acl_submit_btn.click(**acl_submit_args)
|
247 |
+
s2_submit_btn.click(**s2_submit_args)
|
248 |
+
file_submit_btn.click(**file_submit_args)
|
249 |
+
|
250 |
+
clear_btn.click(
|
251 |
+
fn=return_clear,
|
252 |
+
inputs=[],
|
253 |
+
outputs=[
|
254 |
+
title,
|
255 |
+
num_ref,
|
256 |
+
top_field_list,
|
257 |
+
# top_age_list,
|
258 |
+
cfdi,
|
259 |
+
# cadi,
|
260 |
+
cfdi_plot,
|
261 |
+
# cadi_plot,
|
262 |
+
s2_id,
|
263 |
+
acl_link,
|
264 |
+
pdf_file,
|
265 |
+
],
|
266 |
+
)
|
267 |
+
|
268 |
+
demo.queue(concurrency_count=3)
|
269 |
+
demo.launch(server_port=7860, server_name="0.0.0.0")
|
metrics.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
def calculate_gini_simpson(dictionary):
|
8 |
+
"""
|
9 |
+
Function to Calculate Gini Simpson's Diversity Index
|
10 |
+
"""
|
11 |
+
total = sum(dictionary.values())
|
12 |
+
sum_squares = sum((n / total) ** 2 for n in dictionary.values())
|
13 |
+
return 1 - sum_squares
|
14 |
+
|
15 |
+
|
16 |
+
def calculate_gini(frequencies):
|
17 |
+
"""
|
18 |
+
Function to Calculate Gini's Diversity Index
|
19 |
+
"""
|
20 |
+
frequencies = np.array(frequencies)
|
21 |
+
if len(frequencies) == 0 or np.mean(frequencies) == 0:
|
22 |
+
return None
|
23 |
+
total = sum(
|
24 |
+
np.sum(np.abs(xi - frequencies[i:]))
|
25 |
+
for i, xi in enumerate(frequencies[:-1], 1)
|
26 |
+
)
|
27 |
+
return total / (len(frequencies) ** 2 * np.mean(frequencies))
|
pdf.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import scipdf
|
2 |
+
|
3 |
+
|
4 |
+
def parse_pdf_to_artcile_dict(pdf_path):
|
5 |
+
return scipdf.parse_pdf_to_dict(pdf_path)
|
6 |
+
|
7 |
+
|
8 |
+
if __name__ == "__main__":
|
9 |
+
article_dict = scipdf.parse_pdf_to_dict(
|
10 |
+
"/Users/jp/Documents/papers/demo-test/EMNLP23_Influence_NLP_Citation_Analysis.pdf"
|
11 |
+
) # return dictionary
|
12 |
+
print(article_dict.keys())
|
13 |
+
print(article_dict["title"])
|
14 |
+
print(article_dict["references"][0].keys())
|
plots.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
import os
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pandas as pd
|
8 |
+
import seaborn as sns
|
9 |
+
from matplotlib import pyplot as plt
|
10 |
+
from scipy.stats import gaussian_kde
|
11 |
+
|
12 |
+
dirname = os.path.dirname(__file__)
|
13 |
+
|
14 |
+
# Load the csv file into a pandas DataFrame
|
15 |
+
papers_df = pd.read_csv(
|
16 |
+
os.path.join(dirname, "data/nlp_papers_field_diversity.csv")
|
17 |
+
)
|
18 |
+
|
19 |
+
# Compute the mean CFDI
|
20 |
+
mean_cfdi = papers_df["incoming_diversity"].mean()
|
21 |
+
|
22 |
+
# Compute the mean CADI
|
23 |
+
mean_citation_ages = []
|
24 |
+
|
25 |
+
# Open the file and read the content in a list
|
26 |
+
with open(
|
27 |
+
os.path.join(dirname, "data/nlp_papers_citation_age.txt"),
|
28 |
+
"r",
|
29 |
+
encoding="utf-8",
|
30 |
+
) as filehandle:
|
31 |
+
for line in filehandle:
|
32 |
+
temp = float(line[:-1])
|
33 |
+
mean_citation_ages.append(temp)
|
34 |
+
|
35 |
+
|
36 |
+
def generate_cfdi_plot(input_cfdi, compute_type="paper"):
|
37 |
+
"""
|
38 |
+
Function to generate a plot for CFDI
|
39 |
+
"""
|
40 |
+
# Using kdeplot to fill the distribution curve
|
41 |
+
sns.set(font_scale=1.3, style="whitegrid")
|
42 |
+
|
43 |
+
data = papers_df[papers_df["incoming_diversity"] > 0]["incoming_diversity"]
|
44 |
+
kde = gaussian_kde(data)
|
45 |
+
x_vals = np.linspace(data.min(), data.max(), 1000)
|
46 |
+
y_vals = kde.evaluate(x_vals)
|
47 |
+
|
48 |
+
fig, ax = plt.subplots() # create a new figure and axis
|
49 |
+
|
50 |
+
ax.fill_between(x_vals, y_vals, color="skyblue", alpha=0.3)
|
51 |
+
ax.plot(x_vals, y_vals, color="skyblue", linewidth=2, label="Distribution")
|
52 |
+
|
53 |
+
interpolated_y_cfdi = np.interp(input_cfdi, x_vals, y_vals)
|
54 |
+
ax.scatter(
|
55 |
+
input_cfdi,
|
56 |
+
interpolated_y_cfdi,
|
57 |
+
c="r",
|
58 |
+
marker="*",
|
59 |
+
linewidths=2,
|
60 |
+
zorder=2,
|
61 |
+
s=32,
|
62 |
+
)
|
63 |
+
ax.vlines(
|
64 |
+
input_cfdi,
|
65 |
+
0,
|
66 |
+
interpolated_y_cfdi,
|
67 |
+
color="tomato",
|
68 |
+
ls="--",
|
69 |
+
lw=1.5,
|
70 |
+
)
|
71 |
+
|
72 |
+
epsilon = 0.005
|
73 |
+
# Compute the average and plot it as a light grey vertical line
|
74 |
+
mean_val = np.mean(data)
|
75 |
+
# Interpolate the y value for the mean
|
76 |
+
interpolated_y_mean = np.interp(mean_val, x_vals, y_vals)
|
77 |
+
|
78 |
+
ax.vlines(mean_val, 0, interpolated_y_mean, color="grey", ls="--", lw=1.5)
|
79 |
+
ax.text(
|
80 |
+
mean_val + epsilon,
|
81 |
+
interpolated_y_mean + epsilon,
|
82 |
+
"Avg.",
|
83 |
+
{"color": "grey", "fontsize": 13},
|
84 |
+
ha="left", # Horizontal alignment
|
85 |
+
)
|
86 |
+
ax.text(
|
87 |
+
input_cfdi + epsilon,
|
88 |
+
interpolated_y_cfdi + epsilon,
|
89 |
+
f"This {compute_type}",
|
90 |
+
{"color": "#DC143C", "fontsize": 13},
|
91 |
+
ha="left", # Horizontal alignment
|
92 |
+
)
|
93 |
+
|
94 |
+
ax.set_xlabel("Citation Field Diversity Index (CFDI)", fontsize=15)
|
95 |
+
ax.set_ylabel("Density", fontsize=15)
|
96 |
+
sns.despine(left=True, bottom=True, right=True, top=True)
|
97 |
+
|
98 |
+
return fig
|
99 |
+
|
100 |
+
|
101 |
+
def generate_maoc_plot(input_maoc, compute_type="paper"):
|
102 |
+
"""
|
103 |
+
Function to generate a plot for MAOC
|
104 |
+
"""
|
105 |
+
# Using kdeplot to fill the distribution curve
|
106 |
+
sns.set(font_scale=1.3, style="whitegrid")
|
107 |
+
|
108 |
+
data = pd.DataFrame(mean_citation_ages)[0]
|
109 |
+
kde = gaussian_kde(data)
|
110 |
+
x_vals = np.linspace(data.min(), data.max(), 1000)
|
111 |
+
y_vals = kde.evaluate(x_vals)
|
112 |
+
|
113 |
+
fig, ax = plt.subplots() # create a new figure and axis
|
114 |
+
ax.fill_between(x_vals, y_vals, color="skyblue", alpha=0.3)
|
115 |
+
ax.plot(x_vals, y_vals, color="skyblue", linewidth=2, label="Distribution")
|
116 |
+
|
117 |
+
interpolated_y_cfdi = np.interp(input_maoc, x_vals, y_vals)
|
118 |
+
ax.scatter(
|
119 |
+
input_maoc,
|
120 |
+
interpolated_y_cfdi,
|
121 |
+
c="r",
|
122 |
+
marker="*",
|
123 |
+
linewidths=2,
|
124 |
+
zorder=2,
|
125 |
+
s=32,
|
126 |
+
)
|
127 |
+
ax.vlines(
|
128 |
+
input_maoc,
|
129 |
+
0,
|
130 |
+
interpolated_y_cfdi,
|
131 |
+
color="tomato",
|
132 |
+
ls="--",
|
133 |
+
lw=1.5,
|
134 |
+
)
|
135 |
+
|
136 |
+
epsilon = 0.005
|
137 |
+
# Compute the average and plot it as a light grey vertical line
|
138 |
+
mean_val = np.mean(data)
|
139 |
+
# Interpolate the y value for the mean
|
140 |
+
interpolated_y_mean = np.interp(mean_val, x_vals, y_vals)
|
141 |
+
|
142 |
+
ax.vlines(mean_val, 0, interpolated_y_mean, color="grey", ls="--", lw=1.5)
|
143 |
+
ax.text(
|
144 |
+
mean_val + epsilon,
|
145 |
+
interpolated_y_mean + epsilon,
|
146 |
+
"Avg.",
|
147 |
+
{"color": "grey", "fontsize": 13},
|
148 |
+
ha="left", # Horizontal alignment
|
149 |
+
)
|
150 |
+
ax.text(
|
151 |
+
input_maoc + epsilon,
|
152 |
+
interpolated_y_cfdi + epsilon,
|
153 |
+
f"This {compute_type}",
|
154 |
+
{"color": "#DC143C", "fontsize": 13},
|
155 |
+
ha="left", # Horizontal alignment
|
156 |
+
)
|
157 |
+
|
158 |
+
ax.set_xlabel("Mean Age of Citation (mAoC)", fontsize=15)
|
159 |
+
ax.set_ylabel("Density", fontsize=15)
|
160 |
+
sns.despine(left=True, bottom=True, right=True, top=True)
|
161 |
+
|
162 |
+
return fig
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.44.3
|
2 |
+
numpy==1.25.2
|
3 |
+
matplotlib==3.8.0
|
4 |
+
requests==2.31.0
|
5 |
+
futures
|
6 |
+
seaborn==0.12.2
|
7 |
+
scipy==1.11.2
|
8 |
+
beautifulsoup4==4.12.2
|
9 |
+
aiohttp==3.8.5
|
10 |
+
asyncio
|
s2.py
ADDED
@@ -0,0 +1,499 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
|
5 |
+
import asyncio
|
6 |
+
import datetime
|
7 |
+
import os
|
8 |
+
from collections import Counter
|
9 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
10 |
+
from typing import List, Tuple
|
11 |
+
|
12 |
+
import aiohttp
|
13 |
+
import requests
|
14 |
+
|
15 |
+
from aclanthology import (
|
16 |
+
async_match_acl_id_to_s2_paper,
|
17 |
+
extract_author_info,
|
18 |
+
extract_paper_info,
|
19 |
+
extract_venue_info,
|
20 |
+
)
|
21 |
+
from metrics import calculate_gini, calculate_gini_simpson
|
22 |
+
from pdf import parse_pdf_to_artcile_dict
|
23 |
+
|
24 |
+
|
25 |
+
def get_or_create_eventloop():
|
26 |
+
"""
|
27 |
+
Get the current event loop or create a new one if there is no current event loop in the thread.
|
28 |
+
|
29 |
+
Returns:
|
30 |
+
The current event loop.
|
31 |
+
"""
|
32 |
+
try:
|
33 |
+
return asyncio.get_event_loop()
|
34 |
+
except RuntimeError as ex:
|
35 |
+
if "There is no current event loop in thread" in str(ex):
|
36 |
+
loop = asyncio.new_event_loop()
|
37 |
+
asyncio.set_event_loop(loop)
|
38 |
+
return asyncio.get_event_loop()
|
39 |
+
|
40 |
+
|
41 |
+
def send_s2_request(request_url):
|
42 |
+
"""
|
43 |
+
Sends a GET request to the specified URL with the S2 API key in the headers.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
request_url (str): The URL to send the request to.
|
47 |
+
|
48 |
+
Returns:
|
49 |
+
requests.Response: The response object returned by the request.
|
50 |
+
"""
|
51 |
+
return requests.get(
|
52 |
+
request_url,
|
53 |
+
headers={"x-api-key": os.environ["s2apikey"]},
|
54 |
+
timeout=10,
|
55 |
+
)
|
56 |
+
|
57 |
+
|
58 |
+
def check_s2_id_type(semantic_scholar_id):
|
59 |
+
"""
|
60 |
+
Check whether a given Semantic Scholar ID is valid for a paper or an author.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
semantic_scholar_id (str): The Semantic Scholar ID to check.
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
tuple: A tuple containing the type of the ID ("paper" or "author") and
|
67 |
+
the name of the author (if the ID is valid for an author), or "invalid"
|
68 |
+
if the ID is not valid for either a paper or an author.
|
69 |
+
"""
|
70 |
+
# First, check if it's a paper ID
|
71 |
+
paper_response = requests.get(
|
72 |
+
f"https://api.semanticscholar.org/v1/paper/{semantic_scholar_id}",
|
73 |
+
timeout=5,
|
74 |
+
)
|
75 |
+
|
76 |
+
# If the response status code is 200, it means the ID is valid for a paper
|
77 |
+
if paper_response.status_code == 200:
|
78 |
+
return "paper", None
|
79 |
+
|
80 |
+
# Next, check if it's an author ID
|
81 |
+
author_response = requests.get(
|
82 |
+
f"https://api.semanticscholar.org/v1/author/{semantic_scholar_id}",
|
83 |
+
timeout=5,
|
84 |
+
)
|
85 |
+
|
86 |
+
# If the response status code is 200, it means the ID is valid for an author
|
87 |
+
return (
|
88 |
+
"author",
|
89 |
+
author_response.json()["name"]
|
90 |
+
if author_response.status_code == 200
|
91 |
+
else "invalid",
|
92 |
+
)
|
93 |
+
|
94 |
+
|
95 |
+
def get_papers_from_author(ssid_author_id):
|
96 |
+
"""Retrieves all papers for a given author
|
97 |
+
|
98 |
+
Args:
|
99 |
+
ssid_author_id (str): semantic scholar id
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
list: a list of all papers for the given author
|
103 |
+
"""
|
104 |
+
# Create request URL for an author
|
105 |
+
request_url = f"https://api.semanticscholar.org/graph/v1/author/{ssid_author_id}?fields=papers"
|
106 |
+
|
107 |
+
r = send_s2_request(request_url)
|
108 |
+
if r.status_code == 200:
|
109 |
+
papers = r.json().get("papers", [])
|
110 |
+
return [paper["paperId"] for paper in papers]
|
111 |
+
return []
|
112 |
+
|
113 |
+
|
114 |
+
def compute_stats_for_references(s2_ref_paper_keys, year):
|
115 |
+
"""
|
116 |
+
Computes various statistics for a list of reference paper keys.
|
117 |
+
|
118 |
+
Args:
|
119 |
+
s2_ref_paper_keys (list): A list of Semantic Scholar paper keys for the references.
|
120 |
+
year (int): The year of the paper.
|
121 |
+
|
122 |
+
Returns:
|
123 |
+
tuple: A tuple containing the following statistics:
|
124 |
+
- num_references (int): The number of references.
|
125 |
+
- fields_of_study_counts (dict): A dictionary containing the count of each field of study.
|
126 |
+
- year_to_title_dict (dict): A dictionary mapping the year of each reference to its title.
|
127 |
+
- cfdi (float): The CFDI (Cumulative Field Diversity Index) of the references.
|
128 |
+
- cadi (float): The CADI (Cumulative Age Diversity Index) of the references.
|
129 |
+
- output_maoc (float): The MAOC (Mean Age of Citation) of the references.
|
130 |
+
|
131 |
+
If there are no valid references, returns a tuple of None values.
|
132 |
+
"""
|
133 |
+
|
134 |
+
# Go over the references of the paper
|
135 |
+
reference_year_list = []
|
136 |
+
reference_title_list = []
|
137 |
+
reference_fos_list = []
|
138 |
+
with ThreadPoolExecutor() as executor:
|
139 |
+
request_url_refs = [
|
140 |
+
f"https://api.semanticscholar.org/graph/v1/paper/{ref_paper_key}?fields=title,year,s2FieldsOfStudy"
|
141 |
+
for ref_paper_key in s2_ref_paper_keys
|
142 |
+
]
|
143 |
+
futures = [
|
144 |
+
executor.submit(send_s2_request, request_url_ref)
|
145 |
+
for request_url_ref in request_url_refs
|
146 |
+
]
|
147 |
+
for future in as_completed(futures):
|
148 |
+
r_ref = future.result()
|
149 |
+
if r_ref.status_code == 200:
|
150 |
+
result_ref = r_ref.json()
|
151 |
+
(title_ref, year_ref, fields_ref) = (
|
152 |
+
result_ref["title"],
|
153 |
+
result_ref["year"],
|
154 |
+
result_ref["s2FieldsOfStudy"],
|
155 |
+
)
|
156 |
+
reference_year_list.append(year_ref)
|
157 |
+
reference_title_list.append(title_ref)
|
158 |
+
reference_fos_list.extend(
|
159 |
+
field["category"]
|
160 |
+
for field in fields_ref
|
161 |
+
if field["source"] == "s2-fos-model"
|
162 |
+
)
|
163 |
+
else:
|
164 |
+
print(
|
165 |
+
f"Error retrieving reference {r_ref.status_code} for"
|
166 |
+
f" paper {s2_ref_paper_keys}"
|
167 |
+
)
|
168 |
+
|
169 |
+
# Remove all None from reference_year_list and reference_title_list
|
170 |
+
reference_year_list = [
|
171 |
+
year_ref for year_ref in reference_year_list if year_ref is not None
|
172 |
+
]
|
173 |
+
reference_title_list = [
|
174 |
+
title_ref
|
175 |
+
for title_ref in reference_title_list
|
176 |
+
if title_ref is not None
|
177 |
+
]
|
178 |
+
|
179 |
+
# Count references
|
180 |
+
num_references = len(reference_year_list)
|
181 |
+
|
182 |
+
# Flatten list and count occurrences
|
183 |
+
fields_of_study_counts = dict(
|
184 |
+
Counter(
|
185 |
+
[
|
186 |
+
field
|
187 |
+
for field in reference_fos_list
|
188 |
+
if "Computer Science" not in field
|
189 |
+
]
|
190 |
+
)
|
191 |
+
)
|
192 |
+
|
193 |
+
# Citation age list
|
194 |
+
aoc_list = [
|
195 |
+
year - year_ref
|
196 |
+
for year_ref in reference_year_list
|
197 |
+
if year_ref and year
|
198 |
+
]
|
199 |
+
if not aoc_list:
|
200 |
+
return None, None, None, None, None, None
|
201 |
+
|
202 |
+
# Compute citation age
|
203 |
+
output_maoc = sum(aoc_list) / len(aoc_list)
|
204 |
+
cadi = calculate_gini(aoc_list)
|
205 |
+
|
206 |
+
# Create a dictionary of year to title
|
207 |
+
year_to_title_dict = dict(zip(reference_year_list, reference_title_list))
|
208 |
+
|
209 |
+
# Compute CFDI
|
210 |
+
cfdi = calculate_gini_simpson(fields_of_study_counts)
|
211 |
+
|
212 |
+
# Return the results
|
213 |
+
return (
|
214 |
+
num_references,
|
215 |
+
fields_of_study_counts,
|
216 |
+
year_to_title_dict,
|
217 |
+
cfdi,
|
218 |
+
cadi,
|
219 |
+
output_maoc,
|
220 |
+
)
|
221 |
+
|
222 |
+
|
223 |
+
def compute_stats_for_s2_paper(ssid_paper_id):
|
224 |
+
"""
|
225 |
+
Computes statistics for a given paper ID using the Semantic Scholar API.
|
226 |
+
|
227 |
+
Args:
|
228 |
+
ssid_paper_id (str): The Semantic Scholar ID of the paper to compute statistics for.
|
229 |
+
|
230 |
+
Returns:
|
231 |
+
Tuple containing the following statistics:
|
232 |
+
- title_authors (str): The title and authors of the paper.
|
233 |
+
- num_references (int): The number of references in the paper.
|
234 |
+
- fields_of_study_counts (dict): A dictionary containing the count of each field of study in the paper's references.
|
235 |
+
- year_to_title_dict (dict): A dictionary mapping the year of each reference to its title.
|
236 |
+
- cfdi (float): The CFDI (Cumulative Field Diversity Index) of the paper's references.
|
237 |
+
- cadi (float): The CADI (Citation Age Diversity Index) of the paper's references.
|
238 |
+
- output_maoc (float): The MAOC (Mean Age of Citation) of the paper's references.
|
239 |
+
"""
|
240 |
+
# Get the paper and its references
|
241 |
+
request_url = f"https://api.semanticscholar.org/graph/v1/paper/{ssid_paper_id}?fields=references,title,year,authors"
|
242 |
+
r = send_s2_request(request_url)
|
243 |
+
if r.status_code == 200: # if successful request
|
244 |
+
result = r.json()
|
245 |
+
if not result.get("references") or result.get("references") == []:
|
246 |
+
return None, None, None, None, None, None, None, None
|
247 |
+
s2_ref_paper_keys = [
|
248 |
+
reference_paper_tuple["paperId"]
|
249 |
+
for reference_paper_tuple in r.json()["references"]
|
250 |
+
]
|
251 |
+
filtered_s2_ref_paper_keys = [
|
252 |
+
s2_ref_paper_key
|
253 |
+
for s2_ref_paper_key in s2_ref_paper_keys
|
254 |
+
if s2_ref_paper_key is not None
|
255 |
+
]
|
256 |
+
title, year, authors = (
|
257 |
+
result["title"],
|
258 |
+
result["year"],
|
259 |
+
result["authors"],
|
260 |
+
)
|
261 |
+
title_authors = (
|
262 |
+
title + "\n" + ", ".join([author["name"] for author in authors])
|
263 |
+
)
|
264 |
+
|
265 |
+
(
|
266 |
+
num_references,
|
267 |
+
fields_of_study_counts,
|
268 |
+
year_to_title_dict,
|
269 |
+
cfdi,
|
270 |
+
cadi,
|
271 |
+
output_maoc,
|
272 |
+
) = compute_stats_for_references(filtered_s2_ref_paper_keys, year)
|
273 |
+
|
274 |
+
# Return the results
|
275 |
+
return (
|
276 |
+
title_authors,
|
277 |
+
num_references,
|
278 |
+
fields_of_study_counts,
|
279 |
+
year_to_title_dict,
|
280 |
+
cfdi,
|
281 |
+
cadi,
|
282 |
+
output_maoc,
|
283 |
+
)
|
284 |
+
|
285 |
+
|
286 |
+
def compute_stats_for_s2_author(ssid_author_id, author_name):
|
287 |
+
"""
|
288 |
+
Computes statistics for an author based on their papers in the Semantic Scholar database.
|
289 |
+
|
290 |
+
Args:
|
291 |
+
ssid_author_id (str): The Semantic Scholar author ID.
|
292 |
+
author_name (str): The name of the author.
|
293 |
+
|
294 |
+
Returns:
|
295 |
+
dict: A dictionary containing statistics for the author, or None if no papers were found.
|
296 |
+
"""
|
297 |
+
if papers := get_papers_from_author(ssid_author_id):
|
298 |
+
return compute_stats_for_multiple_s2_papers(papers, author_name)
|
299 |
+
return None
|
300 |
+
|
301 |
+
|
302 |
+
def compute_stats_for_acl_paper(url):
|
303 |
+
"""
|
304 |
+
Computes statistics for a paper based on its ACL Anthology URL.
|
305 |
+
|
306 |
+
Args:
|
307 |
+
url (str): The URL of the paper on the ACL Anthology website.
|
308 |
+
|
309 |
+
Returns:
|
310 |
+
dict: A dictionary containing statistics for the paper, or None if the paper was not found.
|
311 |
+
"""
|
312 |
+
if paper_info := extract_paper_info(url):
|
313 |
+
loop = get_or_create_eventloop()
|
314 |
+
# Match paper ID to Semantic Scholar ID
|
315 |
+
s2_paper = loop.run_until_complete(
|
316 |
+
async_match_acl_id_to_s2_paper(paper_info["acl_id"])
|
317 |
+
)
|
318 |
+
return compute_stats_for_s2_paper(s2_paper["paperId"])
|
319 |
+
return None
|
320 |
+
|
321 |
+
|
322 |
+
def compute_stats_for_acl_author(url):
|
323 |
+
"""
|
324 |
+
Computes statistics for an author's papers in the ACL anthology.
|
325 |
+
|
326 |
+
Args:
|
327 |
+
url (str): The URL of the author's page on the ACL anthology website.
|
328 |
+
|
329 |
+
Returns:
|
330 |
+
dict: A dictionary containing statistics for the author's papers, including
|
331 |
+
the number of papers, the number of citations, and the h-index.
|
332 |
+
Returns None if the author's page cannot be accessed or no papers are found.
|
333 |
+
"""
|
334 |
+
if paper_info := extract_author_info(url):
|
335 |
+
loop = get_or_create_eventloop()
|
336 |
+
tasks = [
|
337 |
+
async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
|
338 |
+
for paper in paper_info["papers"]
|
339 |
+
]
|
340 |
+
papers = loop.run_until_complete(asyncio.gather(*tasks))
|
341 |
+
return compute_stats_for_multiple_s2_papers(
|
342 |
+
[paper["paperId"] for paper in papers if "paperId" in paper],
|
343 |
+
paper_info["author"],
|
344 |
+
)
|
345 |
+
return None
|
346 |
+
|
347 |
+
|
348 |
+
def compute_stats_for_acl_venue(url):
|
349 |
+
"""
|
350 |
+
Computes statistics for papers in a given ACL venue.
|
351 |
+
|
352 |
+
Args:
|
353 |
+
url (str): The URL of the ACL venue.
|
354 |
+
|
355 |
+
Returns:
|
356 |
+
dict: A dictionary containing statistics for the papers in the venue.
|
357 |
+
"""
|
358 |
+
if paper_info := extract_venue_info(url):
|
359 |
+
loop = get_or_create_eventloop()
|
360 |
+
tasks = [
|
361 |
+
async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
|
362 |
+
for paper in paper_info["papers"]
|
363 |
+
]
|
364 |
+
papers = loop.run_until_complete(asyncio.gather(*tasks))
|
365 |
+
return compute_stats_for_multiple_s2_papers(
|
366 |
+
[paper["paperId"] for paper in papers if "paperId" in paper],
|
367 |
+
paper_info["venue"],
|
368 |
+
)
|
369 |
+
return None
|
370 |
+
|
371 |
+
|
372 |
+
def compute_stats_for_multiple_s2_papers(
|
373 |
+
papers: List[dict], title: str
|
374 |
+
) -> Tuple[str, int, dict, dict, float, float, float]:
|
375 |
+
"""
|
376 |
+
Computes statistics for multiple S2 papers.
|
377 |
+
|
378 |
+
Args:
|
379 |
+
papers (List[dict]): A list of S2 papers.
|
380 |
+
title (str): The title of the papers.
|
381 |
+
|
382 |
+
Returns:
|
383 |
+
A tuple containing the following statistics:
|
384 |
+
- title (str): The title of the papers.
|
385 |
+
- num_references (int): The total number of references in all papers.
|
386 |
+
- top_fields (dict): A dictionary containing the top fields and their counts.
|
387 |
+
- oldest_paper_dict (dict): A dictionary containing the oldest paper for each year.
|
388 |
+
- cfdi (float): The average CFDI score for all papers.
|
389 |
+
- cadi (float): The average CADI score for all papers.
|
390 |
+
- output_maoc (float): The average output MAOC score for all papers.
|
391 |
+
"""
|
392 |
+
num_references = 0
|
393 |
+
top_fields = {}
|
394 |
+
oldest_paper_dict = {}
|
395 |
+
cfdi = 0
|
396 |
+
cadi = 0
|
397 |
+
output_maoc = 0
|
398 |
+
|
399 |
+
def process_paper(paper):
|
400 |
+
return compute_stats_for_s2_paper(paper)
|
401 |
+
|
402 |
+
with ThreadPoolExecutor() as executor:
|
403 |
+
results_list = list(executor.map(process_paper, papers))
|
404 |
+
|
405 |
+
for results in results_list:
|
406 |
+
if not results or results[0] is None:
|
407 |
+
continue
|
408 |
+
num_references += results[1]
|
409 |
+
for field, count in results[2].items():
|
410 |
+
top_fields[field] = top_fields.get(field, 0) + count
|
411 |
+
for year, ref_title in results[3].items():
|
412 |
+
oldest_paper_dict[year] = ref_title
|
413 |
+
cfdi += results[4]
|
414 |
+
cadi += results[5]
|
415 |
+
output_maoc += results[6]
|
416 |
+
|
417 |
+
return (
|
418 |
+
title,
|
419 |
+
num_references,
|
420 |
+
top_fields,
|
421 |
+
oldest_paper_dict,
|
422 |
+
cfdi / len(papers),
|
423 |
+
cadi / len(papers),
|
424 |
+
output_maoc / len(papers),
|
425 |
+
)
|
426 |
+
|
427 |
+
|
428 |
+
async def send_s2_async_request(url):
|
429 |
+
"""
|
430 |
+
Sends an asynchronous request to the specified URL and returns the response as a JSON object.
|
431 |
+
|
432 |
+
Args:
|
433 |
+
url (str): The URL to send the request to.
|
434 |
+
|
435 |
+
Returns:
|
436 |
+
dict: The response from the URL as a JSON object.
|
437 |
+
"""
|
438 |
+
async with aiohttp.ClientSession() as session:
|
439 |
+
async with session.get(url) as response:
|
440 |
+
return await response.json()
|
441 |
+
|
442 |
+
|
443 |
+
async def match_title_to_s2_paper(title, authors=None):
|
444 |
+
"""
|
445 |
+
Matches a given paper title (and authors) to Semantic Scholar to retrieve its S2 paper ID.
|
446 |
+
|
447 |
+
Args:
|
448 |
+
title (str): The title of the paper.
|
449 |
+
authors (List[str], optional): List of authors of the paper. Defaults to None.
|
450 |
+
|
451 |
+
Returns:
|
452 |
+
str or None: Returns the S2 paper ID if found, otherwise None.
|
453 |
+
"""
|
454 |
+
# Send a request to the Semantic Scholar API to search for the paper by its title
|
455 |
+
search_url = (
|
456 |
+
f"http://api.semanticscholar.org/graph/v1/paper/search?query={title}"
|
457 |
+
)
|
458 |
+
|
459 |
+
# Send request
|
460 |
+
response = await send_s2_async_request(search_url)
|
461 |
+
|
462 |
+
results = response.get("data", [])
|
463 |
+
if len(results) > 0:
|
464 |
+
result = results[0] # Ranked by relevance
|
465 |
+
return result.get("paperId")
|
466 |
+
|
467 |
+
|
468 |
+
async def compute_stats_for_pdf(pdf_file):
|
469 |
+
"""
|
470 |
+
Computes statistics for a given PDF file.
|
471 |
+
|
472 |
+
Args:
|
473 |
+
pdf_file (file): The PDF file to compute statistics for.
|
474 |
+
|
475 |
+
Returns:
|
476 |
+
tuple: A tuple containing the title of the article and the computed statistics.
|
477 |
+
"""
|
478 |
+
s2_paper_ids = []
|
479 |
+
article_dict = parse_pdf_to_artcile_dict(pdf_file.name)
|
480 |
+
references = article_dict["references"]
|
481 |
+
|
482 |
+
# Get S2 paper IDs asynchronously
|
483 |
+
tasks = [
|
484 |
+
match_title_to_s2_paper(reference["title"], reference["authors"])
|
485 |
+
for reference in references
|
486 |
+
if reference["title"]
|
487 |
+
]
|
488 |
+
s2_paper_ids = await asyncio.gather(*tasks)
|
489 |
+
|
490 |
+
# Remove all None values from s2paperids
|
491 |
+
s2_paper_ids = [s2_id for s2_id in s2_paper_ids if s2_id is not None]
|
492 |
+
|
493 |
+
# Compute the current year
|
494 |
+
today = datetime.date.today()
|
495 |
+
year = int(today.strftime("%Y"))
|
496 |
+
|
497 |
+
results = compute_stats_for_references(s2_paper_ids, year)
|
498 |
+
results = (article_dict["title"],) + results
|
499 |
+
return results
|