Datasets:

Modalities:
Image
Text
ArXiv:
Libraries:
Datasets
License:
Yassine commited on
Commit
431c287
·
1 Parent(s): babc7f9

2k19 release

Browse files
Files changed (11) hide show
  1. Chunk_1.zip +3 -0
  2. Chunk_10.zip +3 -0
  3. Chunk_2.zip +3 -0
  4. Chunk_3.zip +3 -0
  5. Chunk_4.zip +3 -0
  6. Chunk_5.zip +3 -0
  7. Chunk_6.zip +3 -0
  8. Chunk_7.zip +3 -0
  9. Chunk_8.zip +3 -0
  10. Chunk_9.zip +3 -0
  11. comma2k19.py +48 -0
Chunk_1.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7f18b77ca5980e36d536344995bcfaffd4cbe6b84fe43ca443ea20c26547e30
3
+ size 8731252405
Chunk_10.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83228bc8ae378561e4bf8b5afdab35a55611fa5961c34a32891b848a2c98044d
3
+ size 9901342289
Chunk_2.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a3f1272e3cce07cc7d9171e68311816aae4c13e70d010d1f0a2ec084abc3c45
3
+ size 9054474112
Chunk_3.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09f959b9ea2d98087ba84e8d356c7833bbc0a8b39de7f1878cf592caa48de636
3
+ size 9412088454
Chunk_4.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36a71a1aa2b4d78070ef6ed4edb1422f6d79362a88857423fa01ad8d6b2996c7
3
+ size 9490564706
Chunk_5.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8073883bff6d2c3159f24b9a82c259de729a5f7b2ff5da667104d6b5bcde8dd4
3
+ size 9812054419
Chunk_6.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:355024fb361a8b50f6925a037d12d2e6f90934d8ed056dfff5e131c82889c497
3
+ size 9526597274
Chunk_7.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc41711118d70eb924a417fa5f5f4856bb3e20833680d374aaec4b27683ab865
3
+ size 9286503359
Chunk_8.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b7dd599681895815c8666838df580d37ae408a72bdc1fabe72e3e276ec8c323
3
+ size 9634728776
Chunk_9.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7012bf0b16f2cda8385c87e3556f92c940ea451d8b809683ccf1b31828987f6
3
+ size 9773161870
comma2k19.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import glob
3
+ import os
4
+ import numpy as np
5
+
6
+ NUM_SHARDS = 10
7
+ _URLS = [
8
+ f'https://huggingface.co/datasets/commaai/commavq/resolve/main/Chunk_{i}.zip' for i in range(1,NUM_SHARDS+1)
9
+ ]
10
+
11
+ _DESCRIPTION = """\
12
+ comma2k19 is a dataset of over 33 hours of commute in California's 280 highway.
13
+ This means 2019 segments, 1 minute long each, on a 20km section of highway driving between California's San Jose and San Francisco.
14
+ comma2k19 is a fully reproducible and scalable dataset.
15
+ The data was collected using comma EONs that has sensors similar to those of any modern smartphone including a road-facing camera, phone GPS, thermometers and 9-axis IMU.
16
+ Additionally, the EON captures raw GNSS measurements and all CAN data sent by the car with a comma grey panda.
17
+ """
18
+
19
+ class Comma2k19(datasets.GeneratorBasedBuilder):
20
+
21
+ def _info(self):
22
+ return datasets.DatasetInfo(
23
+ description=_DESCRIPTION,
24
+ features=datasets.Features(
25
+ {"path": datasets.Value("string")}
26
+ )
27
+ )
28
+
29
+ def _split_generators(self, dl_manager):
30
+ """Returns SplitGenerators."""
31
+ dl_manager.download_config.ignore_url_params = True
32
+ downloaded_files = dl_manager.download(_URLS)
33
+ local_extracted_archive = dl_manager.extract(downloaded_files) if not dl_manager.is_streaming else None
34
+ return [
35
+ datasets.SplitGenerator(
36
+ name=str(i),
37
+ gen_kwargs={"local_extracted_archive":local_extracted_archive[i], "files": dl_manager.iter_archive(downloaded_files[i])}
38
+ ) for i in range(len(downloaded_files))]
39
+
40
+ def _generate_examples(self, local_extracted_archive, files):
41
+ files = glob.glob(local_extracted_archive, recursive=True)
42
+ for path in files:
43
+ file_name = os.path.basename(path)
44
+ yield file_name, {'path': path}
45
+
46
+ def _get_examples_iterable_for_split(self, split_generator):
47
+ for path in split_generator.gen_kwargs['files']:
48
+ yield path[0], {'path': path[0]}