text
stringlengths 1
1.02k
| class_index
int64 0
271
| source
stringclasses 76
values |
---|---|---|
def to_parquet(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
storage_options: Optional[dict] = None,
**parquet_writer_kwargs,
) -> int:
"""Exports the dataset to parquet
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file (e.g. `file.parquet`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.parquet`),
or a BinaryIO, where the dataset will be saved to in the specified format.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
<Added version="2.19.0"/>
**parquet_writer_kwargs (additional keyword arguments):
Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`.
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_parquet("path/to/dataset/directory")
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetWriter
return ParquetDatasetWriter(
self, path_or_buf, batch_size=batch_size, storage_options=storage_options, **parquet_writer_kwargs
).write()
def to_sql(
self,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
**sql_writer_kwargs,
) -> int:
"""Exports the dataset to a SQL database. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
name (`str`):
Name of SQL table.
con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
**sql_writer_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html).
<Changed version="2.11.0">
Now, `index` defaults to `False` if not specified. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
If you would like to write the index, pass `index=True` and also set a name for the index column by
passing `index_label`.
</Changed>
Returns:
`int`: The number of records written.
Example:
```py
>>> # con provided as a connection URI string
>>> ds.to_sql("data", "sqlite:///my_own_db.sql")
>>> # con provided as a sqlite3 connection object
>>> import sqlite3
>>> con = sqlite3.connect("my_own_db.sql")
>>> with con:
... ds.to_sql("data", con)
```
"""
# Dynamic import to avoid circular dependency
from .io.sql import SqlDatasetWriter
return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write()
def _estimate_nbytes(self) -> int:
dataset_nbytes = self.data.nbytes | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Find decodable columns, because if there are any, we need to
# adjust the dataset size computation (needed for sharding) to account for possible external files
decodable_columns = [
k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)
]
if decodable_columns:
# Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples
extra_nbytes = 0
def extra_nbytes_visitor(array, feature):
nonlocal extra_nbytes
if isinstance(feature, (Audio, Image, Video)):
for x in array.to_pylist():
if x is not None and x["bytes"] is None and x["path"] is not None:
size = xgetsize(x["path"])
extra_nbytes += size
extra_nbytes -= array.field("path").nbytes | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
table = self.with_format("arrow")[:1000]
table_visitor(table, extra_nbytes_visitor)
extra_nbytes = extra_nbytes * len(self.data) / len(table)
dataset_nbytes = dataset_nbytes + extra_nbytes
if self._indices is not None:
dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data)
return dataset_nbytes
@staticmethod
def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int):
for shard_idx, shard in enumerate(shards):
for pa_table in shard.with_format("arrow").iter(batch_size):
yield shard_idx, pa_table
@staticmethod
def _generate_tables_from_cache_file(filename: str):
for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)):
yield batch_idx, pa.Table.from_batches([batch]) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset":
"""Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`].
This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files.
Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop).
Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets.
All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset.
Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`].
This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
To get the best speed performance, make sure your dataset doesn't have an indices mapping.
If this is the case, the data are not read contiguously, which can be slow sometimes.
You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset.
Args:
num_shards (`int`, default to `1`):
Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly,
and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example.
Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk.
Returns:
[`datasets.IterableDataset`]
Example: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Basic usage:
```python
>>> ids = ds.to_iterable_dataset()
>>> for example in ids:
... pass
```
With lazy filtering and processing:
```python
>>> ids = ds.to_iterable_dataset()
>>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset
>>> for example in ids:
... pass
```
With sharding to enable efficient shuffling:
```python
>>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over
>>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating
>>> for example in ids:
... pass
``` | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
With a PyTorch DataLoader:
```python
>>> import torch
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.filter(filter_fn).map(process_fn)
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating
>>> for example in ids:
... pass
```
With a PyTorch DataLoader and shuffling:
```python
>>> import torch
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
>>> for example in ids:
... pass
``` | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling
```python
>>> from datasets.distributed import split_dataset_by_node
>>> ids = ds.to_iterable_dataset(num_shards=512)
>>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating
>>> for example in ids:
... pass
``` | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
With shuffling and multiple epochs:
```python
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> for epoch in range(n_epochs):
... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating
... for example in ids:
... pass
```
Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups.
"""
from .iterable_dataset import ArrowExamplesIterable, IterableDataset | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if self._format_type is not None:
raise NotImplementedError(
"Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset"
)
if num_shards > len(self):
raise ValueError(
f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)."
)
if self._indices is not None:
logger.info(
"Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. "
"You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed."
)
shards = (
[copy.deepcopy(self)]
if num_shards == 1
else [ | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards)
]
)
ex_iterable = ArrowExamplesIterable(
Dataset._generate_tables_from_shards,
kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE},
)
return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features)) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def _push_parquet_shards_to_hub(
self,
repo_id: str,
data_dir: str = "data",
split: Optional[str] = None,
token: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = False,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[int] = None,
embed_external_files: bool = True,
) -> Tuple[str, str, int, int, List[str], int]:
"""Pushes the dataset shards as Parquet files to the hub. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Returns:
additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards
uploaded_size (`int`): number of uploaded bytes to the repository
dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression
"""
# Find decodable columns, because if there are any, we need to:
# embed the bytes from the files in the shards
decodable_columns = (
[k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)]
if embed_external_files
else []
)
dataset_nbytes = self._estimate_nbytes()
if num_shards is None:
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
num_shards = int(dataset_nbytes / max_shard_size) + 1
num_shards = max(num_shards, 1) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
if decodable_columns:
from .io.parquet import get_writer_batch_size
def shards_with_embedded_external_files(shards: Iterator[Dataset]) -> Iterator[Dataset]:
for shard in shards:
format = shard.format
shard = shard.with_format("arrow")
shard = shard.map(
embed_table_storage,
batched=True,
batch_size=get_writer_batch_size(shard.features),
keep_in_memory=True,
)
shard = shard.with_format(**format)
yield shard
shards = shards_with_embedded_external_files(shards)
api = HfApi(endpoint=config.HF_ENDPOINT, token=token) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
uploaded_size = 0
additions = []
for index, shard in hf_tqdm(
enumerate(shards),
desc="Uploading the dataset shards",
total=num_shards,
):
shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet"
buffer = BytesIO()
shard.to_parquet(buffer)
uploaded_size += buffer.tell()
shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer)
api.preupload_lfs_files(
repo_id=repo_id,
additions=[shard_addition],
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
additions.append(shard_addition)
return additions, uploaded_size, dataset_nbytes | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def push_to_hub(
self,
repo_id: str,
config_name: str = "default",
set_default: Optional[bool] = None,
split: Optional[str] = None,
data_dir: Optional[str] = None,
commit_message: Optional[str] = None,
commit_description: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = False,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[int] = None,
embed_external_files: bool = True,
) -> CommitInfo:
"""Pushes the dataset to the hub as a Parquet dataset.
The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
The resulting Parquet files are self-contained by default. If your dataset contains [`Image`], [`Audio`] or [`Video`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to `False`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
config_name (`str`, defaults to "default"):
The configuration name (or subset) of a dataset. Defaults to "default".
set_default (`bool`, *optional*):
Whether to set this configuration as the default one. Otherwise, the default configuration is the one
named "default".
split (`str`, *optional*):
The name of the split that will be given to that dataset. Defaults to `self.split`.
data_dir (`str`, *optional*):
Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
from "default", else "data". | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
<Added version="2.17.0"/>
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload dataset"`.
commit_description (`str`, *optional*):
Description of the commit that will be created.
Additionally, description of the PR if a PR is created (`create_pr` is True). | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
<Added version="2.16.0"/>
private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the
organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
revision (`str`, *optional*):
Branch to push the uploaded files to. Defaults to the `"main"` branch.
<Added version="2.15.0"/>
create_pr (`bool`, *optional*, defaults to `False`):
Whether to create a PR with the uploaded files or directly commit. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
<Added version="2.15.0"/>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by
a unit (like `"5MB"`).
num_shards (`int`, *optional*):
Number of shards to write. By default, the number of shards depends on `max_shard_size`.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files.
Return:
huggingface_hub.CommitInfo
Example: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
```python
>>> dataset.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
>>> dataset.push_to_hub("<organization>/<dataset_id>", num_shards=1024)
```
If your dataset has multiple splits (e.g. train/validation/test):
```python
>>> train_dataset.push_to_hub("<organization>/<dataset_id>", split="train")
>>> val_dataset.push_to_hub("<organization>/<dataset_id>", split="validation")
>>> # later
>>> dataset = load_dataset("<organization>/<dataset_id>")
>>> train_dataset = dataset["train"]
>>> val_dataset = dataset["validation"]
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if "Video(" in str(self.features):
raise NotImplementedError(
"push_to_hub is not implemented for video datasets, instead you should upload the video files "
"using e.g. the huggingface_hub library and optionally upload a metadata.csv or metadata.jsonl "
"file containing other information like video captions, features or labels. More information "
"at https://huggingface.co/docs/datasets/main/en/video_load#videofolder"
)
if config_name == "data": | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.") | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if max_shard_size is not None and num_shards is not None:
raise ValueError(
"Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
)
if split is None:
split = str(self.split) if self.split is not None else "train"
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
repo_url = api.create_repo(
repo_id,
token=token,
repo_type="dataset",
private=private,
exist_ok=True,
)
repo_id = repo_url.repo_id
if revision is not None and not revision.startswith("refs/pr/"):
# We do not call create_branch for a PR reference: 400 Bad Request
api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if not data_dir:
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub(
repo_id=repo_id,
data_dir=data_dir,
split=split,
token=token,
revision=revision,
max_shard_size=max_shard_size,
num_shards=num_shards,
create_pr=create_pr,
embed_external_files=embed_external_files,
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
# and delete old split shards (if they exist)
repo_with_dataset_card, repo_with_dataset_infos = False, False
deletions, deleted_size = [], 0
repo_splits = [] # use a list to keep the order of the splits
repo_files_to_add = [addition.path_in_repo for addition in additions]
for repo_file in api.list_repo_tree(
repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True
):
if not isinstance(repo_file, RepoFile):
continue
if repo_file.rfilename == config.REPOCARD_FILENAME:
repo_with_dataset_card = True
elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
repo_with_dataset_infos = True
elif ( | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add
):
deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
deleted_size += repo_file.size
elif fnmatch.fnmatch(
repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
):
repo_split = string_to_dict(
repo_file.rfilename,
glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
)["split"]
if repo_split not in repo_splits:
repo_splits.append(repo_split) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
info_to_dump = self.info.copy()
info_to_dump.download_checksums = None
info_to_dump.download_size = uploaded_size
info_to_dump.dataset_size = dataset_nbytes
info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict(
{split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)}
)
# get the info from the README to update them
if repo_with_dataset_card:
dataset_card_path = api.hf_hub_download(
repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
)
dataset_card = DatasetCard.load(Path(dataset_card_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
if dataset_infos and config_name in dataset_infos:
repo_info = dataset_infos[config_name]
else:
repo_info = None
# get the deprecated dataset_infos.json to update them
elif repo_with_dataset_infos:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
dataset_infos_path = api.hf_hub_download(
repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None
repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None
else:
dataset_card = None | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
repo_info = None
# update the total info to dump from existing info
if repo_info is not None:
logger.info("Updating downloaded metadata with the new split.")
if repo_info.splits and list(repo_info.splits) != [split]:
if self._info.features != repo_info.features:
raise ValueError(
f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}"
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if split in repo_info.splits:
repo_info.download_size -= deleted_size
repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0 | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
repo_info.download_checksums = None
repo_info.download_size = (repo_info.download_size or 0) + uploaded_size
repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes
repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size
repo_info.splits.pop(split, None)
repo_info.splits[split] = SplitInfo(
split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name
)
info_to_dump = repo_info
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs and repo_splits:
default_metadata_configs_to_dump = {
"data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# update the metadata configs
if config_name in metadata_configs:
metadata_config = metadata_configs[config_name]
if "data_files" in metadata_config:
data_files_to_dump = sanitize_patterns(metadata_config["data_files"])
else:
data_files_to_dump = {}
# add the new split
data_files_to_dump[split] = [f"{data_dir}/{split}-*"]
metadata_config_to_dump = {
"data_files": [
{
"split": _split,
"path": _pattern[0] if len(_pattern) == 1 else _pattern,
}
for _split, _pattern in data_files_to_dump.items()
]
}
else:
metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]}
if set_default and config_name != "default":
if metadata_configs: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
default_config_name = metadata_configs.get_default_config_name()
if default_config_name == "default":
raise ValueError(
"There exists a configuration named 'default'. To set a different configuration as default, "
"rename the 'default' one first."
)
else:
_ = metadata_configs[default_config_name].pop("default")
metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
buffer = BytesIO() | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
additions.append(
CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
)
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
additions.append(
CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
commit_message = commit_message if commit_message is not None else "Upload dataset"
if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
commit_info = api.create_commit(
repo_id,
operations=additions + deletions,
commit_message=commit_message,
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
else:
logger.info(
f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
)
num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
for i in range(0, num_commits):
operations = additions[
i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
] + (deletions if i == 0 else [])
commit_info = api.create_commit(
repo_id,
operations=operations,
commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
logger.info(
f"Commit #{i + 1} completed"
+ (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ "."
)
return commit_info | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
@transmit_format
@fingerprint_transform(inplace=False)
def add_column(
self, name: str, column: Union[list, np.array], new_fingerprint: str, feature: Optional[FeatureType] = None
):
"""Add column to Dataset.
<Added version="1.7"/>
Args:
name (`str`):
Column name.
column (`list` or `np.array`):
Column data to be added.
feature (`FeatureType` or `None`, defaults to `None`):
Column datatype.
Returns:
[`Dataset`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> more_text = ds["text"]
>>> ds.add_column(name="text_2", column=more_text)
Dataset({
features: ['text', 'label', 'text_2'],
num_rows: 1066
})
```
""" | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if feature:
pyarrow_schema = Features({name: feature}).arrow_schema
else:
pyarrow_schema = None
column_table = InMemoryTable.from_pydict({name: column}, schema=pyarrow_schema)
_check_column_names(self._data.column_names + column_table.column_names)
dataset = self.flatten_indices() if self._indices is not None else self
# Concatenate tables horizontally
table = concat_tables([dataset._data, column_table], axis=1)
# Update features
info = dataset.info.copy()
info.features.update(Features.from_arrow_schema(column_table.schema))
table = update_metadata_with_features(table, info.features)
return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def add_faiss_index(
self,
column: str,
index_name: Optional[str] = None,
device: Optional[int] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None, # noqa: F821
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
dtype=np.float32,
):
"""Add a dense index using Faiss for fast retrieval.
By default the index is done over the vectors of the specified column.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
column (`str`):
The column of the vectors to add to the index.
index_name (`str`, *optional*):
The `index_name`/identifier of the index.
This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
By default it corresponds to `column`.
device (`Union[int, List[int]]`, *optional*):
If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (`str`, *optional*):
This is passed to the index factory of Faiss to create the index.
Default index class is `IndexFlat`.
metric_type (`int`, *optional*):
Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
custom_index (`faiss.Index`, *optional*):
Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (`int`):
Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`.
<Added version="2.4.0"/>
train_size (`int`, *optional*):
If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to `False`):
Enable the verbosity of the Faiss index.
dtype (`data-type`):
The dtype of the numpy arrays that are indexed.
Default is `np.float32`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```python
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))
>>> ds_with_embeddings.add_faiss_index(column='embeddings')
>>> # query
>>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)
>>> # save index
>>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> # load index
>>> ds.load_faiss_index('embeddings', 'my_index.faiss')
>>> # query
>>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)
```
"""
with self.formatted_as(type="numpy", columns=[column], dtype=dtype):
super().add_faiss_index(
column=column,
index_name=index_name,
device=device,
string_factory=string_factory,
metric_type=metric_type,
custom_index=custom_index,
batch_size=batch_size,
train_size=train_size,
faiss_verbose=faiss_verbose,
)
return self | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def add_faiss_index_from_external_arrays(
self,
external_arrays: np.array,
index_name: str,
device: Optional[int] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None, # noqa: F821
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
dtype=np.float32,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of `external_arrays`.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
external_arrays (`np.array`):
If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
index_name (`str`):
The `index_name`/identifier of the index.
This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
device (Optional `Union[int, List[int]]`, *optional*):
If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (`str`, *optional*):
This is passed to the index factory of Faiss to create the index.
Default index class is `IndexFlat`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
metric_type (`int`, *optional*):
Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (`faiss.Index`, *optional*):
Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (`int`, *optional*):
Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (`int`, *optional*):
If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False):
Enable the verbosity of the Faiss index.
dtype (`numpy.dtype`):
The dtype of the numpy arrays that are indexed. Default is np.float32.
"""
super().add_faiss_index_from_external_arrays(
external_arrays=external_arrays.astype(dtype), | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
index_name=index_name,
device=device,
string_factory=string_factory,
metric_type=metric_type,
custom_index=custom_index,
batch_size=batch_size,
train_size=train_size,
faiss_verbose=faiss_verbose,
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def add_elasticsearch_index(
self,
column: str,
index_name: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
"""Add a text index using ElasticSearch for fast retrieval. This is done in-place. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
column (`str`):
The column of the documents to add to the index.
index_name (`str`, *optional*):
The `index_name`/identifier of the index.
This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`~Dataset.search`].
By default it corresponds to `column`.
host (`str`, *optional*, defaults to `localhost`):
Host of where ElasticSearch is running.
port (`str`, *optional*, defaults to `9200`):
Port of where ElasticSearch is running.
es_client (`elasticsearch.Elasticsearch`, *optional*):
The elasticsearch client used to create the index if host and port are `None`.
es_index_name (`str`, *optional*):
The elasticsearch index name used to create the index.
es_index_config (`dict`, *optional*):
The configuration of the elasticsearch index. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Default config is:
```
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
```
Example: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
```python
>>> es_client = elasticsearch.Elasticsearch()
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index")
>>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)
```
"""
with self.formatted_as(type=None, columns=[column]):
super().add_elasticsearch_index(
column=column,
index_name=index_name,
host=host,
port=port,
es_client=es_client,
es_index_name=es_index_name,
es_index_config=es_index_config,
)
return self
@transmit_format
@fingerprint_transform(inplace=False)
def add_item(self, item: dict, new_fingerprint: str):
"""Add item to Dataset.
<Added version="1.7"/> | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
item (`dict`):
Item data to be added.
Returns:
[`Dataset`]
Example: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
>>> ds = ds.add_item(new_review)
>>> ds[-1]
{'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
```
"""
item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()})
# We don't call _check_if_features_can_be_aligned here so this cast is "unsafe"
dset_features, item_features = _align_features(
[self._info.features, Features.from_arrow_schema(item_table.schema)]
)
# Cast to align the schemas of the tables and concatenate the tables
table = concat_tables(
[
self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data,
item_table.cast(item_features.arrow_schema), | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
]
)
if self._indices is None:
indices_table = None
else:
item_indices_array = pa.array([len(self._data)], type=pa.uint64())
item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"])
indices_table = concat_tables([self._indices, item_indices_table])
info = self.info.copy()
info.features.update(item_features)
table = update_metadata_with_features(table, info.features)
return Dataset(
table,
info=info,
split=self.split,
indices_table=indices_table,
fingerprint=new_fingerprint,
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset":
"""Align the dataset's label ID and label name mapping to match an input `label2id` mapping.
This is useful when you want to ensure that a model's predicted labels are aligned with the dataset.
The alignment in done using the lowercase label names.
Args:
label2id (`dict`):
The label name to ID mapping to align the dataset with.
label_column (`str`):
The column name of labels to align on.
Example:
```python
>>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2}
>>> ds = load_dataset("glue", "mnli", split="train")
>>> # mapping to align with
>>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
>>> ds_aligned = ds.align_labels_with_mapping(label2id, "label")
``` | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
"""
# Sanity checks
if label_column not in self._data.column_names:
raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).")
label_feature = self._info.features[label_column]
if not (
isinstance(label_feature, ClassLabel)
or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel))
):
raise ValueError(
f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}."
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Sort input mapping by ID value to ensure the label names are aligned
label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))
label_names = list(label2id.keys())
# Some label mappings use uppercase label names so we lowercase them during alignment
label2id = {k.lower(): v for k, v in label2id.items()}
int2str_function = (
label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str
)
if isinstance(label_feature, ClassLabel):
def process_label_ids(batch):
dset_label_names = [
int2str_function(label_id).lower() if label_id is not None else None
for label_id in batch[label_column]
]
batch[label_column] = [
label2id[label_name] if label_name is not None else None for label_name in dset_label_names
]
return batch
else: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def process_label_ids(batch):
dset_label_names = [
[int2str_function(label_id).lower() if label_id is not None else None for label_id in seq]
for seq in batch[label_column]
]
batch[label_column] = [
[label2id[label_name] if label_name is not None else None for label_name in seq]
for seq in dset_label_names
]
return batch
features = self.features
features[label_column] = (
ClassLabel(num_classes=len(label_names), names=label_names)
if isinstance(label_feature, ClassLabel)
else Sequence(ClassLabel(num_classes=len(label_names), names=label_names))
)
return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels") | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
class NumExamplesMismatchError(Exception):
pass | 5 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
class Url(str):
pass | 6 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
class EmptyDatasetError(FileNotFoundError):
pass | 7 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
class DataFilesList(List[str]):
"""
List of data files (absolute local paths or URLs).
It has two construction methods given the user's data files patterns:
- ``from_hf_repo``: resolve patterns inside a dataset repository
- ``from_local_or_remote``: resolve patterns from a local path
Moreover, DataFilesList has an additional attribute ``origin_metadata``.
It can store:
- the last modified time of local files
- ETag of remote files
- commit sha of a dataset repository
Thanks to this additional attribute, it is possible to hash the list
and get a different hash if and only if at least one file changed.
This is useful for caching Dataset objects that are obtained from a list of data files.
"""
def __init__(self, data_files: List[str], origin_metadata: List[SingleOriginMetadata]) -> None:
super().__init__(data_files)
self.origin_metadata = origin_metadata | 8 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
def __add__(self, other: "DataFilesList") -> "DataFilesList":
return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata)
@classmethod
def from_hf_repo(
cls,
patterns: List[str],
dataset_info: huggingface_hub.hf_api.DatasetInfo,
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/")
return cls.from_patterns(
patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
) | 8 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
@classmethod
def from_local_or_remote(
cls,
patterns: List[str],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = base_path if base_path is not None else Path().resolve().as_posix()
return cls.from_patterns(
patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
) | 8 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
@classmethod
def from_patterns(
cls,
patterns: List[str],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = base_path if base_path is not None else Path().resolve().as_posix()
data_files = []
for pattern in patterns:
try:
data_files.extend(
resolve_pattern(
pattern,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
except FileNotFoundError:
if not has_magic(pattern):
raise
origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
return cls(data_files, origin_metadata) | 8 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
pattern = "|".join("\\" + ext for ext in extensions)
pattern = re.compile(f".*({pattern})(\\..+)?$")
return DataFilesList(
[data_file for data_file in self if pattern.match(data_file)],
origin_metadata=self.origin_metadata,
) | 8 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
class DataFilesDict(Dict[str, DataFilesList]):
"""
Dict of split_name -> list of data files (absolute local paths or URLs).
It has two construction methods given the user's data files patterns :
- ``from_hf_repo``: resolve patterns inside a dataset repository
- ``from_local_or_remote``: resolve patterns from a local path
Moreover, each list is a DataFilesList. It is possible to hash the dictionary
and get a different hash if and only if at least one file changed.
For more info, see [`DataFilesList`].
This is useful for caching Dataset objects that are obtained from a list of data files.
Changing the order of the keys of this dictionary also doesn't change its hash.
""" | 9 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
@classmethod
def from_local_or_remote(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
patterns_for_key
if isinstance(patterns_for_key, DataFilesList)
else DataFilesList.from_local_or_remote(
patterns_for_key,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
return out | 9 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
@classmethod
def from_hf_repo(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
dataset_info: huggingface_hub.hf_api.DatasetInfo,
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
patterns_for_key
if isinstance(patterns_for_key, DataFilesList)
else DataFilesList.from_hf_repo(
patterns_for_key,
dataset_info=dataset_info,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
return out | 9 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
@classmethod
def from_patterns(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
patterns_for_key
if isinstance(patterns_for_key, DataFilesList)
else DataFilesList.from_patterns(
patterns_for_key,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
return out | 9 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
out = type(self)()
for key, data_files_list in self.items():
out[key] = data_files_list.filter_extensions(extensions)
return out | 9 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
class DataFilesPatternsList(List[str]):
"""
List of data files patterns (absolute local paths or URLs).
For each pattern there should also be a list of allowed extensions
to keep, or a None ot keep all the files for the pattern.
"""
def __init__(
self,
patterns: List[str],
allowed_extensions: List[Optional[List[str]]],
):
super().__init__(patterns)
self.allowed_extensions = allowed_extensions
def __add__(self, other):
return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions)
@classmethod
def from_patterns(
cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None
) -> "DataFilesPatternsList":
return cls(patterns, [allowed_extensions] * len(patterns)) | 10 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
def resolve(
self,
base_path: str,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = base_path if base_path is not None else Path().resolve().as_posix()
data_files = []
for pattern, allowed_extensions in zip(self, self.allowed_extensions):
try:
data_files.extend(
resolve_pattern(
pattern,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
except FileNotFoundError:
if not has_magic(pattern):
raise
origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
return DataFilesList(data_files, origin_metadata) | 10 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsList":
return DataFilesPatternsList(
self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions]
) | 10 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]):
"""
Dict of split_name -> list of data files patterns (absolute local paths or URLs).
"""
@classmethod
def from_patterns(
cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None
) -> "DataFilesPatternsDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
patterns_for_key
if isinstance(patterns_for_key, DataFilesPatternsList)
else DataFilesPatternsList.from_patterns(
patterns_for_key,
allowed_extensions=allowed_extensions,
)
)
return out | 11 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
def resolve(
self,
base_path: str,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = DataFilesDict()
for key, data_files_patterns_list in self.items():
out[key] = data_files_patterns_list.resolve(base_path, download_config)
return out
def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict":
out = type(self)()
for key, data_files_patterns_list in self.items():
out[key] = data_files_patterns_list.filter_extensions(extensions)
return out | 11 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py |
class _BaseExamplesIterable:
"""Base class for the examples iterable used by an IterableDataset"""
def __init__(self) -> None:
self._state_dict: Optional[Union[list, dict]] = None
def __iter__(self) -> Iterator[Tuple[Key, dict]]:
"""An examples iterable should yield tuples (example_key, example) of type (int/str, dict)"""
raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet")
@property
def iter_arrow(self) -> Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]]:
return None
@property
def is_typed(self) -> bool:
return False
@property
def features(self) -> Optional[Features]:
return None | 12 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable":
"""
Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable.
If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self.
"""
raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet")
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "_BaseExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet") | 12 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def split_shard_indices_by_worker(self, num_shards: int, index: int, contiguous=True) -> List[int]:
if contiguous:
div = self.num_shards // num_shards
mod = self.num_shards % num_shards
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
return list(range(start, end))
else:
return list(range(index, self.num_shards, num_shards))
@property
def num_shards(self) -> int:
raise NotImplementedError(f"{type(self)} doesn't implement num_shards yet")
def _init_state_dict(self) -> dict:
raise NotImplementedError(f"{type(self)} doesn't implement _init_state_dict yet") | 12 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def load_state_dict(self, state_dict: dict) -> dict:
def _inner_load_state_dict(state, new_state):
if new_state is not None and isinstance(state, dict):
for key in new_state:
state[key] = _inner_load_state_dict(state[key], new_state[key])
return state
elif new_state is not None and isinstance(state, list):
for i in range(len(state)):
state[i] = _inner_load_state_dict(state[i], new_state[i])
return state
return new_state
return _inner_load_state_dict(self._state_dict, state_dict)
def state_dict(self) -> dict:
if self._state_dict:
return copy.deepcopy(self._state_dict)
raise RuntimeError("State dict is not initialized, please call ex_iterable._init_state_dict() first.") | 12 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
class ExamplesIterable(_BaseExamplesIterable):
def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict):
super().__init__()
self.generate_examples_fn = generate_examples_fn
self.kwargs = kwargs
def _init_state_dict(self) -> dict:
self._state_dict = {"shard_idx": 0, "shard_example_idx": 0}
return self._state_dict | 13 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def __iter__(self):
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
for key_example in islice(self.generate_examples_fn(**gen_kwags), shard_example_idx_start, None):
if self._state_dict:
self._state_dict["shard_example_idx"] += 1
yield key_example
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0
def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable":
return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator) | 13 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ExamplesIterable":
"""Keep only the requested shard."""
gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards)
shard_indices = self.split_shard_indices_by_worker(num_shards, index, contiguous=contiguous)
requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs)
@property
def num_shards(self) -> int:
return _number_of_shards_in_gen_kwargs(self.kwargs) | 13 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
class ShuffledDataSourcesExamplesIterable(ExamplesIterable):
def __init__(
self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator
):
super().__init__(generate_examples_fn, kwargs)
self.generator = deepcopy(generator)
def _init_state_dict(self) -> dict:
self._state_dict = {"shard_idx": 0, "shard_example_idx": 0}
return self._state_dict | 14 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def __iter__(self):
"""Shuffle the kwargs order to shuffle shards"""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(
_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.num_shards), shard_idx_start, None
):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
for key_example in islice(self.generate_examples_fn(**gen_kwags), shard_example_idx_start, None):
if self._state_dict:
self._state_dict["shard_example_idx"] += 1
yield key_example
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0 | 14 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ExamplesIterable":
"""Keep only the requested shard."""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources(
num_shards, index, contiguous=contiguous
) | 14 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
class ArrowExamplesIterable(_BaseExamplesIterable):
def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict):
super().__init__()
self.generate_tables_fn = generate_tables_fn
self.kwargs = kwargs
@property
def iter_arrow(self):
return self._iter_arrow
def _init_state_dict(self) -> dict:
self._state_dict = {"shard_idx": 0, "shard_example_idx": 0}
return self._state_dict | 15 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def __iter__(self):
formatter = PythonFormatter()
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
shard_example_idx = 0
for key, pa_table in self.generate_tables_fn(**gen_kwags):
if shard_example_idx + len(pa_table) <= shard_example_idx_start:
shard_example_idx += len(pa_table)
continue
for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
formatted_batch = formatter.format_batch(pa_subtable)
for example in _batch_to_examples(formatted_batch):
if shard_example_idx >= shard_example_idx_start:
if self._state_dict: | 15 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
self._state_dict["shard_example_idx"] += 1
yield key, example
shard_example_idx += 1
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0 | 15 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def _iter_arrow(self):
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
shard_example_idx = 0
for key, pa_table in self.generate_tables_fn(**gen_kwags):
shard_example_idx += len(pa_table)
if shard_example_idx <= shard_example_idx_start:
continue
if self._state_dict:
self._state_dict["shard_example_idx"] += len(pa_table)
yield key, pa_table
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0 | 15 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable":
return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ArrowExamplesIterable":
"""Keep only the requested shard."""
gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards)
shard_indices = self.split_shard_indices_by_worker(num_shards, index, contiguous=contiguous)
requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs)
@property
def num_shards(self) -> int:
return _number_of_shards_in_gen_kwargs(self.kwargs) | 15 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable):
def __init__(
self,
generate_tables_fn: Callable[..., Tuple[Key, pa.Table]],
kwargs: dict,
generator: np.random.Generator,
):
super().__init__(generate_tables_fn, kwargs)
self.generator = deepcopy(generator)
def _init_state_dict(self) -> dict:
self._state_dict = {"shard_idx": 0, "shard_example_idx": 0}
return self._state_dict | 16 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def __iter__(self):
"""Shuffle the kwargs order to shuffle shards"""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
formatter = PythonFormatter()
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(
_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.num_shards), shard_idx_start, None
):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
shard_example_idx = 0
for key, pa_table in self.generate_tables_fn(**gen_kwags):
if shard_example_idx + len(pa_table) <= shard_example_idx_start:
shard_example_idx += len(pa_table)
continue
for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER): | 16 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
formatted_batch = formatter.format_batch(pa_subtable)
for example in _batch_to_examples(formatted_batch):
if shard_example_idx >= shard_example_idx_start:
if self._state_dict:
self._state_dict["shard_example_idx"] += 1
yield key, example
shard_example_idx += 1
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0 | 16 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |
def _iter_arrow(self):
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(
_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.num_shards), shard_idx_start, None
):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
shard_example_idx = 0
for key, pa_table in self.generate_tables_fn(**gen_kwags):
shard_example_idx += len(pa_table)
if shard_example_idx <= shard_example_idx_start:
continue
if self._state_dict:
self._state_dict["shard_example_idx"] += len(pa_table)
yield key, pa_table
if self._state_dict:
self._state_dict["shard_idx"] += 1 | 16 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py |