Datasets:
Fix `license` metadata
#1
by
julien-c
HF Staff
- opened
- README.md +4 -4
- code_clippy_github.py +37 -32
README.md
CHANGED
@@ -3,9 +3,9 @@ annotations_creators: []
|
|
3 |
language_creators:
|
4 |
- crowdsourced
|
5 |
- expert-generated
|
6 |
-
|
7 |
-
|
8 |
-
-
|
9 |
multilinguality:
|
10 |
- multilingual
|
11 |
pretty_name: code-clippy-github-code
|
@@ -173,4 +173,4 @@ The paper ["Evaluating Large Language Models Trained on Code"](https://arxiv.org
|
|
173 |
- The query was executed on _February 1, 2022, 12:15:59 AM EST_
|
174 |
|
175 |
## Acknowledgements
|
176 |
-
This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/about/). We would also like to thank [Dr. Razvan Bunescu](https://webpages.charlotte.edu/rbunescu/) and [The College of Computing and Informatics at UNC Charlotte](https://cci.charlotte.edu/) for their generous contributions to this project, specifically in funding the BigQuery and Google Cloud Storage costs.
|
|
|
3 |
language_creators:
|
4 |
- crowdsourced
|
5 |
- expert-generated
|
6 |
+
languages: ["code"]
|
7 |
+
licenses:
|
8 |
+
- other-multiple
|
9 |
multilinguality:
|
10 |
- multilingual
|
11 |
pretty_name: code-clippy-github-code
|
|
|
173 |
- The query was executed on _February 1, 2022, 12:15:59 AM EST_
|
174 |
|
175 |
## Acknowledgements
|
176 |
+
This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/about/). We would also like to thank [Dr. Razvan Bunescu](https://webpages.charlotte.edu/rbunescu/) and [The College of Computing and Informatics at UNC Charlotte](https://cci.charlotte.edu/) for their generous contributions to this project, specifically in funding the BigQuery and Google Cloud Storage costs.
|
code_clippy_github.py
CHANGED
@@ -20,7 +20,6 @@ import os
|
|
20 |
|
21 |
|
22 |
import datasets
|
23 |
-
from datasets.download.streaming_download_manager import xopen
|
24 |
from huggingface_hub import HfApi, HfFolder
|
25 |
from datasets.data_files import DataFilesDict
|
26 |
|
@@ -157,12 +156,19 @@ class CodeClippyGithub(datasets.GeneratorBasedBuilder):
|
|
157 |
)
|
158 |
|
159 |
def _split_generators(self, dl_manager):
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
return [
|
167 |
datasets.SplitGenerator(
|
168 |
name=datasets.Split.TRAIN,
|
@@ -175,31 +181,30 @@ class CodeClippyGithub(datasets.GeneratorBasedBuilder):
|
|
175 |
def _generate_examples(self, files):
|
176 |
key = 0
|
177 |
for file_idx, file in enumerate(files):
|
178 |
-
with
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
key += 1
|
203 |
|
204 |
|
205 |
def lang_from_name(name):
|
|
|
20 |
|
21 |
|
22 |
import datasets
|
|
|
23 |
from huggingface_hub import HfApi, HfFolder
|
24 |
from datasets.data_files import DataFilesDict
|
25 |
|
|
|
156 |
)
|
157 |
|
158 |
def _split_generators(self, dl_manager):
|
159 |
+
|
160 |
+
hfh_dataset_info = HfApi(datasets.config.HF_ENDPOINT).dataset_info(
|
161 |
+
_REPO_NAME,
|
162 |
+
timeout=100.0,
|
163 |
+
)
|
164 |
+
|
165 |
+
patterns = datasets.data_files.get_patterns_in_dataset_repository(hfh_dataset_info)
|
166 |
+
data_files = datasets.data_files.DataFilesDict.from_hf_repo(
|
167 |
+
patterns,
|
168 |
+
dataset_info=hfh_dataset_info,
|
169 |
+
)
|
170 |
+
|
171 |
+
files = dl_manager.download_and_extract(data_files["train"])
|
172 |
return [
|
173 |
datasets.SplitGenerator(
|
174 |
name=datasets.Split.TRAIN,
|
|
|
181 |
def _generate_examples(self, files):
|
182 |
key = 0
|
183 |
for file_idx, file in enumerate(files):
|
184 |
+
with gzip.open(file, "rb") as f:
|
185 |
+
|
186 |
+
uncompressed_data = f.readlines()
|
187 |
+
|
188 |
+
for batch_idx, code_base in enumerate(uncompressed_data):
|
189 |
+
j_dict = json.loads(code_base.decode('utf-8'))
|
190 |
+
|
191 |
+
|
192 |
+
|
193 |
+
lang = lang_from_name(j_dict['path'])
|
194 |
+
license = j_dict["license"]
|
195 |
+
|
196 |
+
if self.config.filter_languages and not lang in self.config.languages:
|
197 |
+
continue
|
198 |
+
if self.config.filter_licenses and not license in self.config.licenses:
|
199 |
+
continue
|
200 |
+
# TODO: Add more features like header comments, filename, and other features useful in a prompt.
|
201 |
+
yield key, {"code_text": j_dict['content'],
|
202 |
+
"repo_name": j_dict['repo_name'],
|
203 |
+
"file_path": j_dict['path'],
|
204 |
+
"license": license,
|
205 |
+
"language": lang,
|
206 |
+
"size": int(j_dict['f0_'])}
|
207 |
+
key += 1
|
|
|
208 |
|
209 |
|
210 |
def lang_from_name(name):
|