
    +gdn                       d dl Z d dlZd dlZd dlZd dlZd dlZd dlZd dlmZ d dl	m
Z
 d dlmZmZmZmZmZmZmZ d dlZd dlZd dlmZ d dlmZ ddlmZ dd	lmZ dd
lmZ ddl m!Z! ddl"m#Z# ddl$m%Z%m&Z& ddl'm(Z(m)Z) ddl*m+Z+ ddl,m-Z-m.Z.m/Z/m0Z0 ddl1m2Z2 ddl3m4Z4 ddl5m6Z6 ddl7m8Z8 ddl9m:Z: ddl;m<Z< ddl=m>Z>  e6j?        e@          ZA G d deB          ZC G d deB          ZDdS )    N)BytesIO)Path)CallableDictListOptionalSequenceTupleUnion)HfApi)DatasetMetadata   )config)Dataset)DownloadConfig)Features)FeatureType)extract_path_from_uriis_remote_filesystem)DatasetInfoDatasetInfosDict)	_split_re)
NamedSplitSplit	SplitDict	SplitInfo)Table)TaskTemplate)logging)is_documented_by)cached_path)
hf_hub_url)PathLikec            %       
    e Zd ZdZd Zd Zdef fdZede	e
ef         fd            Zede	e
e	f         fd            Zede	e
ef         fd            Zede	e
ef         fd	            Zede	e
ee
         f         fd
            Zede	e
ee         f         fd            ZdadbdZde
de	e
ef         fdZde	e
ef         fdZd Zdedd fdZde
dd fdZdee
ee
         f         dd fdZde
de
dd fdZde	e
e
f         dd fdZdee
ee
         f         dd fdZ dcde
de!dd fdZ"e#j$        	 	 	 ddd!e%e
         d"e%e         d#e!fd$            Z&	 	 	 ddd!e%e
         d"e%e         d#e!fd%Z'd& Z(	 	 ded'e%e)         d"e%e         d#e!fd(Z*	 	 	 ddd!e%e
         d"e%e         d#e!dd fd)Z+	 	 ded'e%e)         d"e%e         d#e!dd fd*Z,	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dfd,e%e)         d-e!d.e!d/e%ee
ee
         f                  d0e!d1e%e         d2e!d3e%ee
ee
         f                  d4e!d5e%e!         d6e%e	e
e%e
         f                  d7e%e         de%e         d8e!d9e%e-         d:e%e         d;e%e
         dd f$d<Z.	 	 	 	 	 	 	 	 	 	 	 dgd/e%ee
ee
         f                  d0e!d1e%e         d4e!d5e%e!         d6e%e	e
e%e
         f                  d7e%e         d9e%e-         d:e%e         d;e%e
         dd fd=Z/	 	 	 	 	 	 	 dhdee
e0e
         f         d@ee!e0e!         f         dAe
d4e!d5e%e!         dBe%e	e
e%e
         f                  d7e%e         dd fdCZ1	 	 	 	 	 	 	 didDe%eee	e
e%e         f         f                  dEe%e         dFe%e	e
e2j3        j4        f                  d4e!d5e%e!         dBe%e	e
e%e
         f                  d7e%e         dd fdGZ5	 	 	 	 	 djdHe6dIe%ee
ef                  dJe%e	e
ef                  d:e%e         dKe%e-         f
dLZ7e8	 	 	 dkdHe6d4e%e!         dKe%e-         dd fdM            Z9e8	 	 	 dddNe	e
e6f         de%e         dOe
d4e!dd f
dP            Z:e8	 	 	 dddNe	e
e6f         de%e         dOe
d4e!dd f
dQ            Z;e8	 	 	 	 dldNe	e
e6f         de%e         dOe
d4e!d"e%ee
                  dd fdR            Z<e8	 	 	 dddNe	e
e6f         de%e         dOe
d4e!dd f
dS            Z= e>ej?                  dmdUee
e@f         dVedd fdW            Z? e>ejA                  dXe	dYe
dd fdZ            ZA	 	 	 	 	 	 dnd\e%e!         d]e%e
         d^e%d          dIe%eee
f                  dJe%e	e
ef                  d_e!fd`ZB xZCS )oDatasetDictz`A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)c                     |                                  D ]7}t          |t                    s t          dt	          |           d          8d S )NzBValues in `DatasetDict` should be of type `Dataset` but got type '')values
isinstancer   	TypeErrortypeselfdatasets     5lib/python3.11/site-packages/datasets/dataset_dict.py_check_values_typezDatasetDict._check_values_type*   sd    {{}} 	w 	wGgw// w ueijqerer u u uvvvw	w 	w    c                 F   t          |                                           }t          |d d         |dd                    D ]^\  }}|d         j        |d         j        k    r=t	          d|d          d|d          d|d         j         d|d         j                   _d S )Nr   zNAll datasets in `DatasetDict` should have the same features but features for 'r   z' and 'z' don't match: z != )listitemszipfeatures
ValueError)r-   r5   item_aitem_bs       r/   _check_values_featuresz"DatasetDict._check_values_features/   s   TZZ\\""!%*eABBi88 	 	NFFay!VAY%777  ~eklmen  ~  ~w}~  xA  ~  ~  RX  YZ  R[  Rd  ~  ~  jp  qr  js  j|  ~  ~   8	 	r1   returnc                     t          |t          t          f          st                     dk    r!t	                                          |          S  fdt          j        t          j        t          j	        fD             }|r|d         nt                     d         }t          d| d| d| dt                                )Nr   c                     g | ]}|v |	S  r?   ).0splitr-   s     r/   
<listcomp>z+DatasetDict.__getitem__.<locals>.<listcomp>;   s,     * * *RW[_R_R_R_R_R_r1   zInvalid key: zD. Please first select a split. For example: `my_dataset_dictionary['z'][z]`. Available splits: )r)   strr   lensuper__getitem__r   TRAINTEST
VALIDATIONr4   KeyErrorsorted)r-   kavailable_suggested_splitssuggested_split	__class__s   `   r/   rF   zDatasetDict.__getitem__7   s    a#z*++ 	s4yyA~~77&&q)))* * * *$)KU=M#N* * *& @Zl8;;_cdh_i_ijk_lO4 4 4+:4 4?@4 4%+D\\4 4  r1   c                 f    |                                   d |                                 D             S )zThe Apache Arrow tables backing each split.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.data
        ```
        c                 $    i | ]\  }}||j         S r?   )datar@   rL   r.   s      r/   
<dictcomp>z$DatasetDict.data.<locals>.<dictcomp>R   s     ???JAw7<???r1   r0   r5   r-   s    r/   rR   zDatasetDict.dataE   s2     	!!!??$**,,????r1   c                 f    |                                   d |                                 D             S )a  The cache files containing the Apache Arrow table backing each split.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.cache_files
        {'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
         'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
         'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
        ```
        c                 $    i | ]\  }}||j         S r?   )cache_filesrS   s      r/   rT   z+DatasetDict.cache_files.<locals>.<dictcomp>d   !    FFF:1g7&FFFr1   rU   rV   s    r/   rY   zDatasetDict.cache_filesT   s2     	!!!FFFFFFr1   c                 f    |                                   d |                                 D             S )a  Number of columns in each split of the dataset.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.num_columns
        {'test': 2, 'train': 2, 'validation': 2}
        ```
        c                 $    i | ]\  }}||j         S r?   )num_columnsrS   s      r/   rT   z+DatasetDict.num_columns.<locals>.<dictcomp>t   rZ   r1   rU   rV   s    r/   r]   zDatasetDict.num_columnsf   s2     	!!!FFFFFFr1   c                 f    |                                   d |                                 D             S )a>  Number of rows in each split of the dataset (same as :func:`datasets.Dataset.__len__`).

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.num_rows
        {'test': 1066, 'train': 8530, 'validation': 1066}
        ```
        c                 $    i | ]\  }}||j         S r?   )num_rowsrS   s      r/   rT   z(DatasetDict.num_rows.<locals>.<dictcomp>   s!    CCC
77#CCCr1   rU   rV   s    r/   r`   zDatasetDict.num_rowsv   s2     	!!!CCdjjllCCCCr1   c                 f    |                                   d |                                 D             S )aV  Names of the columns in each split of the dataset.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.column_names
        {'test': ['text', 'label'],
         'train': ['text', 'label'],
         'validation': ['text', 'label']}
        ```
        c                 $    i | ]\  }}||j         S r?   column_namesrS   s      r/   rT   z,DatasetDict.column_names.<locals>.<dictcomp>   s!    GGGJAw7'GGGr1   rU   rV   s    r/   rd   zDatasetDict.column_names   s2     	!!!GG$**,,GGGGr1   c                 f    |                                   d |                                 D             S )a:  Shape of each split of the dataset (number of columns, number of rows).

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.shape
        {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
        ```
        c                 $    i | ]\  }}||j         S r?   )shaperS   s      r/   rT   z%DatasetDict.shape.<locals>.<dictcomp>   s     @@@ZQ7=@@@r1   rU   rV   s    r/   rg   zDatasetDict.shape   s2     	!!!@@4::<<@@@@r1      c                     |                                   t          fd|                                 D                       S )a  Flatten the Apache Arrow Table of each split (nested features are flatten).
        Each column with a struct type is flattened into one column per struct field.
        Other columns are left unchanged.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("squad")
        >>> ds["train"].features
        {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
         'context': Value(dtype='string', id=None),
         'id': Value(dtype='string', id=None),
         'question': Value(dtype='string', id=None),
         'title': Value(dtype='string', id=None)}
        >>> ds.flatten()
        DatasetDict({
            train: Dataset({
                features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
                num_rows: 87599
            })
            validation: Dataset({
                features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
                num_rows: 10570
            })
        })
        ```
        c                 D    i | ]\  }}||                                S ))	max_depth)flatten)r@   rL   r.   rk   s      r/   rT   z'DatasetDict.flatten.<locals>.<dictcomp>   s-    ccc
7AwCCcccr1   r0   r%   r5   )r-   rk   s    `r/   rl   zDatasetDict.flatten   sG    : 	!!!ccccVZV`V`VbVbcccdddr1   columnc                 l    |                                   fd|                                 D             S )as  Return a list of the unique elements in a column for each split.

        This is implemented in the low-level backend and as such, very fast.

        Args:
            column (`str`):
                column name (list all the column names with [`~datasets.Dataset.column_names`])

        Returns:
            Dict[`str`, `list`]: Dictionary of unique elements in the given column.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.unique("label")
        {'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
        ```
        c                 B    i | ]\  }}||                               S r?   )unique)r@   rL   r.   rn   s      r/   rT   z&DatasetDict.unique.<locals>.<dictcomp>   s+    IIIja7>>&))IIIr1   rU   )r-   rn   s    `r/   rq   zDatasetDict.unique   s9    * 	!!!IIIIDJJLLIIIIr1   c                 f    |                                   d |                                 D             S )a  Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
        Be careful when running this command that no other process is currently using other cache files.

        Return:
            `Dict` with the number of removed files for each split

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.cleanup_cache_files()
        {'test': 0, 'train': 0, 'validation': 0}
        ```
        c                 >    i | ]\  }}||                                 S r?   )cleanup_cache_filesrS   s      r/   rT   z3DatasetDict.cleanup_cache_files.<locals>.<dictcomp>   s*    PPPZQ7..00PPPr1   rU   rV   s    r/   rt   zDatasetDict.cleanup_cache_files   s2      	!!!PP4::<<PPPPr1   c                     d                     d |                                 D                       }t          j        dd|dt          j                  }d| dS )N
c                 "    g | ]\  }}| d | S )z: r?   )r@   rL   vs      r/   rB   z(DatasetDict.__repr__.<locals>.<listcomp>   s&    ???$!QQ++!++???r1   ^z    r   zDatasetDict({
z
}))joinr5   resubM)r-   reprs     r/   __repr__zDatasetDict.__repr__   sT    yy??$**,,???@@vdGT1bd33-$----r1   r7   c                     |                                   t          fd|                                 D                       S )a  
        Cast the dataset to a new set of features.
        The transformation is applied to all the datasets of the dataset dictionary.

        You can also remove a column using [`Dataset.map`] with `feature` but `cast`
        is in-place (doesn't copy the data to a new dataset) and is thus faster.

        Args:
            features ([`Features`]):
                New features to cast the dataset to.
                The name and order of the fields in the features must match the current column names.
                The type of the data must also be convertible from one type to the other.
                For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds["train"].features
        {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
         'text': Value(dtype='string', id=None)}
        >>> new_features = ds["train"].features.copy()
        >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
        >>> new_features['text'] = Value('large_string')
        >>> ds = ds.cast(new_features)
        >>> ds["train"].features
        {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
         'text': Value(dtype='large_string', id=None)}
        ```
        c                 D    i | ]\  }}||                                S )r7   castr@   rL   r.   r7   s      r/   rT   z$DatasetDict.cast.<locals>.<dictcomp>  s-    ^^^:1gAw||X|>>^^^r1   rm   r-   r7   s    `r/   r   zDatasetDict.cast   sH    @ 	!!!^^^^QUQ[Q[Q]Q]^^^___r1   c                     |                                   t          fd|                                 D                       S )a  Cast column to feature for decoding.

        Args:
            column (`str`):
                Column name.
            feature ([`Feature`]):
                Target feature.

        Returns:
            [`DatasetDict`]

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds["train"].features
        {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
         'text': Value(dtype='string', id=None)}
        >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
        >>> ds["train"].features
        {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
         'text': Value(dtype='string', id=None)}
        ```
        c                 F    i | ]\  }}||                                S )rn   featurecast_columnr@   rL   r.   rn   r   s      r/   rT   z+DatasetDict.cast_column.<locals>.<dictcomp>6  s5    rrrWaWXZaAw22&'2RRrrrr1   rm   r-   rn   r   s    ``r/   r   zDatasetDict.cast_column  sK    4 	!!!rrrrreieoeoeqeqrrrsssr1   rd   c                     |                                   t          fd|                                 D                       S )a-  
        Remove one or several column(s) from each split in the dataset
        and the features associated to the column(s).

        The transformation is applied to all the splits of the dataset dictionary.

        You can also remove a column using [`Dataset.map`] with `remove_columns` but the present method
        is in-place (doesn't copy the data to a new dataset) and is thus faster.

        Args:
            column_names (`Union[str, List[str]]`):
                Name of the column(s) to remove.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.remove_columns("label")
        DatasetDict({
            train: Dataset({
                features: ['text'],
                num_rows: 8530
            })
            validation: Dataset({
                features: ['text'],
                num_rows: 1066
            })
            test: Dataset({
                features: ['text'],
                num_rows: 1066
            })
        })
        ```
        c                 D    i | ]\  }}||                                S rc   remove_columnsr@   rL   r.   rd   s      r/   rT   z.DatasetDict.remove_columns.<locals>.<dictcomp>]  3    pppU_UVX_Aw55<5PPpppr1   rm   r-   rd   s    `r/   r   zDatasetDict.remove_columns8  sH    H 	!!!ppppcgcmcmcocopppqqqr1   original_column_namenew_column_namec                     |                                   t          fd|                                 D                       S )a  
        Rename a column in the dataset and move the features associated to the original column under the new column name.
        The transformation is applied to all the datasets of the dataset dictionary.

        You can also rename a column using [`~Dataset.map`] with `remove_columns` but the present method:
            - takes care of moving the original features under the new column name.
            - doesn't copy the data to a new dataset and is thus much faster.

        Args:
            original_column_name (`str`):
                Name of the column to rename.
            new_column_name (`str`):
                New name for the column.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.rename_column("label", "label_new")
        DatasetDict({
            train: Dataset({
                features: ['text', 'label_new'],
                num_rows: 8530
            })
            validation: Dataset({
                features: ['text', 'label_new'],
                num_rows: 1066
            })
            test: Dataset({
                features: ['text', 'label_new'],
                num_rows: 1066
            })
        })
        ```
        c                 F    i | ]\  }}||                                S )r   r   rename_columnr@   rL   r.   r   r   s      r/   rT   z-DatasetDict.rename_column.<locals>.<dictcomp>  D       Aw 7((>Rds(tt  r1   rm   r-   r   r   s    ``r/   r   zDatasetDict.rename_column_  s^    J 	!!!    "&**,,  
 
 	
r1   column_mappingc                     |                                   t          fd|                                 D                       S )aA  
        Rename several columns in the dataset, and move the features associated to the original columns under
        the new column names.
        The transformation is applied to all the datasets of the dataset dictionary.

        Args:
            column_mapping (`Dict[str, str]`):
                A mapping of columns to rename to their new names.

        Returns:
            [`DatasetDict`]: A copy of the dataset with renamed columns.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
        DatasetDict({
            train: Dataset({
                features: ['text_new', 'label_new'],
                num_rows: 8530
            })
            validation: Dataset({
                features: ['text_new', 'label_new'],
                num_rows: 1066
            })
            test: Dataset({
                features: ['text_new', 'label_new'],
                num_rows: 1066
            })
        })
        ```
        c                 D    i | ]\  }}||                                S )r   rename_columnsr@   rL   r.   r   s      r/   rT   z.DatasetDict.rename_columns.<locals>.<dictcomp>  s3    tttYcYZ\cAw55^5TTtttr1   rm   r-   r   s    `r/   r   zDatasetDict.rename_columns  sH    F 	!!!ttttgkgqgqgsgstttuuur1   c                     |                                   t          fd|                                 D                       S )ao  Select one or several column(s) from each split in the dataset and
        the features associated to the column(s).

        The transformation is applied to all the splits of the dataset
        dictionary.

        Args:
            column_names (`Union[str, List[str]]`):
                Name of the column(s) to keep.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.select_columns("text")
        DatasetDict({
            train: Dataset({
                features: ['text'],
                num_rows: 8530
            })
            validation: Dataset({
                features: ['text'],
                num_rows: 1066
            })
            test: Dataset({
                features: ['text'],
                num_rows: 1066
            })
        })
        ```
        c                 D    i | ]\  }}||                                S r   select_columnsr   s      r/   rT   z.DatasetDict.select_columns.<locals>.<dictcomp>  r   r1   rm   r   s    `r/   r   zDatasetDict.select_columns  sH    B 	!!!ppppcgcmcmcocopppqqqr1   Finclude_nullsc                     |                                   t          fd|                                 D                       S )a  Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.

        Args:
            column (`str`):
                The name of the column to cast.
            include_nulls (`bool`, defaults to `False`):
                Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.

                <Added version="1.14.2"/>

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("boolq")
        >>> ds["train"].features
        {'answer': Value(dtype='bool', id=None),
         'passage': Value(dtype='string', id=None),
         'question': Value(dtype='string', id=None)}
        >>> ds = ds.class_encode_column("answer")
        >>> ds["train"].features
        {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
         'passage': Value(dtype='string', id=None),
         'question': Value(dtype='string', id=None)}
        ```
        c                 F    i | ]\  }}||                                S ))rn   r   )class_encode_column)r@   rL   r.   rn   r   s      r/   rT   z3DatasetDict.class_encode_column.<locals>.<dictcomp>  s5    www\f\]_fQ++6+WWwwwr1   rm   )r-   rn   r   s    ``r/   r   zDatasetDict.class_encode_column  sQ    6 	!!!wwwwwjnjtjtjvjvwww
 
 	
r1   Nr+   columnsoutput_all_columnsc           	   +   ^  K   |                                   d |                                 D             }d |                                 D             }d |                                 D             }d |                                 D             }	  | j        |||fi | dV  |                                 D ]-\  }	}
 |
j        ||	         ||	         ||	         fi ||	          .dS # |                                 D ]-\  }	}
 |
j        ||	         ||	         ||	         fi ||	          .w xY w)a  To be used in a `with` statement. Set `__getitem__` return format (type and columns).
        The transformation is applied to all the datasets of the dataset dictionary.

        Args:
            type (`str`, *optional*):
                Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
                `None` means `__getitem__` returns python objects (default).
            columns (`List[str]`, *optional*):
                Columns to format in the output.
                `None` means `__getitem__` returns all columns (default).
            output_all_columns (`bool`, defaults to False):
                Keep un-formatted columns as well in the output (as python objects).
            **format_kwargs (additional keyword arguments):
                Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
        c                 $    i | ]\  }}||j         S r?   )_format_typerS   s      r/   rT   z,DatasetDict.formatted_as.<locals>.<dictcomp>  s!    RRRzq'1g2RRRr1   c                 $    i | ]\  }}||j         S r?   )_format_kwargsrS   s      r/   rT   z,DatasetDict.formatted_as.<locals>.<dictcomp>  s!    VVV:1gQ 6VVVr1   c                 $    i | ]\  }}||j         S r?   )_format_columnsrS   s      r/   rT   z,DatasetDict.formatted_as.<locals>.<dictcomp>  s!    XXXZQa!8XXXr1   c                 $    i | ]\  }}||j         S r?   )_output_all_columnsrS   s      r/   rT   z,DatasetDict.formatted_as.<locals>.<dictcomp>  s!    !`!`!`ZQ!W%@!`!`!`r1   N)r0   r5   
set_format)r-   r+   r   r   format_kwargsold_format_typeold_format_kwargsold_format_columnsold_output_all_columnsrL   r.   s              r/   formatted_aszDatasetDict.formatted_as  s     . 	!!!RRTZZ\\RRRVVVVVXX4::<<XXX!`!`SWS]S]S_S_!`!`!`	DOD'+=OOOOOEEE"jjll  
7""#A&(:1(=?UVW?X \mno\p    djjll  
7""#A&(:1(=?UVW?X \mno\p   s   C( (AD,c                 ~    |                                   |                                 D ]} |j        d|||d| dS )a9  Set `__getitem__` return format (type and columns).
        The format is set for every dataset in the dataset dictionary.

        Args:
            type (`str`, *optional*):
                Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
                `None` means `__getitem__` returns python objects (default).
            columns (`List[str]`, *optional*):
                Columns to format in the output.
                `None` means `__getitem__` returns all columns (default).
            output_all_columns (`bool`, defaults to False):
                Keep un-formatted columns as well in the output (as python objects),
            **format_kwargs (additional keyword arguments):
                Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.

        It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
        gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:

            `new formatted columns = (all columns - previously unformatted columns)`

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> from transformers import AutoTokenizer
        >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
        >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
        >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
        >>> ds["train"].format
        {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
         'format_kwargs': {},
         'output_all_columns': False,
         'type': 'numpy'}
        ```
        r+   r   r   Nr?   r0   r(   r   r-   r+   r   r   r   r.   s         r/   r   zDatasetDict.set_format  sc    T 	!!!{{}} 	s 	sGGrD'N`rrdqrrrr	s 	sr1   c                     |                                   |                                 D ]}|                                 dS )a]  Reset `__getitem__` return format to python objects and all columns.
        The transformation is applied to all the datasets of the dataset dictionary.

        Same as `self.set_format()`

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> from transformers import AutoTokenizer
        >>> ds = load_dataset("rotten_tomatoes")
        >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
        >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
        >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
        >>> ds["train"].format
        {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
         'format_kwargs': {},
         'output_all_columns': False,
         'type': 'numpy'}
        >>> ds.reset_format()
        >>> ds["train"].format
        {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
         'format_kwargs': {},
         'output_all_columns': False,
         'type': None}
        ```
        Nr   r,   s     r/   reset_formatzDatasetDict.reset_formatI  sL    8 	!!!{{}} 	! 	!G    	! 	!r1   	transformc                     |                                   |                                 D ]}|                    d|||           dS )aW  Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
        The transform is set for every dataset in the dataset dictionary
        As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`

        Args:
            transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
                A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
                This function is applied right before returning the objects in ``__getitem__``.
            columns (`List[str]`, optional): columns to format in the output
                If specified, then the input batch of the transform only contains those columns.
            output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
                If set to True, then the other un-formatted columns are kept with the output of the transform.

        custom)r   r   r   Nr   r-   r   r   r   r.   s        r/   set_transformzDatasetDict.set_transformi  s]    ( 	!!!{{}} 	v 	vGxM_ktuuuu	v 	vr1   c                 P    t          j        |           } |j        d|||d| |S )a  Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
        The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
        The format is set for every dataset in the dataset dictionary.

        It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].

        Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.

        Args:
            type (`str`, *optional*):
                Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
                `None` means `__getitem__` returns python objects (default).
            columns (`List[str]`, *optional*):
                Columns to format in the output.
                `None` means `__getitem__` returns all columns (default).
            output_all_columns (`bool`, defaults to `False`):
                Keep un-formatted columns as well in the output (as python objects).
            **format_kwargs (additional keyword arguments):
                Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> from transformers import AutoTokenizer
        >>> ds = load_dataset("rotten_tomatoes")
        >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
        >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
        >>> ds["train"].format
        {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
         'format_kwargs': {},
         'output_all_columns': False,
         'type': None}
        >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
        >>> ds["train"].format
        {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
         'format_kwargs': {},
         'output_all_columns': False,
         'type': 'tensorflow'}
        ```
        r   r?   )copydeepcopyr   r   s         r/   with_formatzDatasetDict.with_format  s=    ` -%%ngJ\nn`mnnnr1   c                 ^    t          j        |           }|                    |||           |S )a
  Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
        The transform is set for every dataset in the dataset dictionary

        As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].

        Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.

        Args:
            transform (`Callable`, *optional*):
                User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
                A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
                This function is applied right before returning the objects in `__getitem__`.
            columns (`List[str]`, *optional*):
                Columns to format in the output.
                If specified, then the input batch of the transform only contains those columns.
            output_all_columns (`bool`, defaults to False):
                Keep un-formatted columns as well in the output (as python objects).
                If set to `True`, then the other un-formatted columns are kept with the output of the transform.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> from transformers import AutoTokenizer
        >>> ds = load_dataset("rotten_tomatoes")
        >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
        >>> def encode(example):
        ...     return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
        >>> ds = ds.with_transform(encode)
        >>> ds["train"][0]
        {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
         1, 1, 1, 1, 1, 1, 1, 1, 1]),
         'input_ids': tensor([  101,  1103,  2067,  1110, 17348,  1106,  1129,  1103,  6880,  1432,
                112,   188,  1207,   107, 14255,  1389,   107,  1105,  1115,  1119,
                112,   188,  1280,  1106,  1294,   170, 24194,  1256,  3407,  1190,
                170, 11791,  5253,   188,  1732,  7200, 10947, 12606,  2895,   117,
                179,  7766,   118,   172, 15554,  1181,  3498,  6961,  3263,  1137,
                188,  1566,  7912, 14516,  6997,   119,   102]),
         'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                0, 0, 0, 0, 0, 0, 0, 0, 0])}
        ```
        )r   r   r   )r   r   r   r   s        r/   with_transformzDatasetDict.with_transform  s5    d -%%	7Wijjjr1     functionwith_indices	with_rankinput_columnsbatched
batch_sizedrop_last_batchr   keep_in_memoryload_from_cache_filecache_file_nameswriter_batch_sizedisable_nullable	fn_kwargsnum_procdescc                    	
 |                                   d | D             t          	
fd|                                 D                       S )a  Apply a function to all the elements in the table (individually or in batches)
        and update the table (if function does updated examples).
        The transformation is applied to all the datasets of the dataset dictionary.

        Args:
            function (`callable`): with one of the following signature:
                - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
                - `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
                - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
                - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`

                For advanced usage, the function can also return a `pyarrow.Table`.
                Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.

            with_indices (`bool`, defaults to `False`):
                Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
            with_rank (`bool`, defaults to `False`):
                Provide process rank to `function`. Note that in this case the
                signature of `function` should be `def function(example[, idx], rank): ...`.
            input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
                The columns to be passed into `function` as
                positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
            batched (`bool`, defaults to `False`):
                Provide batch of examples to `function`.
            batch_size (`int`, *optional*, defaults to `1000`):
                Number of examples per batch provided to `function` if `batched=True`,
                `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
            drop_last_batch (`bool`, defaults to `False`):
                Whether a last batch smaller than the batch_size should be
                dropped instead of being processed by the function.
            remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
                Remove a selection of columns while doing the mapping.
                Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
                columns with names in `remove_columns`, these columns will be kept.
            keep_in_memory (`bool`, defaults to `False`):
                Keep the dataset in memory instead of writing it to a cache file.
            load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
                If a cache file storing the current computation from `function`
                can be identified, use it instead of recomputing.
            cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
                Provide the name of a path for the cache file. It is used to store the
                results of the computation instead of the automatically generated cache file name.
                You have to provide one `cache_file_name` per dataset in the dataset dictionary.
            writer_batch_size (`int`, default `1000`):
                Number of rows per write operation for the cache file writer.
                This value is a good trade-off between memory usage during the processing, and processing speed.
                Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
            features (`[datasets.Features]`, *optional*, defaults to `None`):
                Use a specific [`Features`] to store the cache file
                instead of the automatically generated one.
            disable_nullable (`bool`, defaults to `False`):
                Disallow null values in the table.
            fn_kwargs (`Dict`, *optional*, defaults to `None`):
                Keyword arguments to be passed to `function`
            num_proc (`int`, *optional*, defaults to `None`):
                Number of processes for multiprocessing. By default it doesn't
                use multiprocessing.
            desc (`str`, *optional*, defaults to `None`):
                Meaningful description to be displayed alongside with the progress bar while mapping examples.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> def add_prefix(example):
        ...     example["text"] = "Review: " + example["text"]
        ...     return example
        >>> ds = ds.map(add_prefix)
        >>> ds["train"][0:3]["text"]
        ['Review: the rock is destined to be the 21st century's new " conan " and that he's going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
         'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson's expanded vision of j . r . r . tolkien's middle-earth .',
         'Review: effective but too-tepid biopic']

        # process a batch of examples
        >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
        # set number of processors
        >>> ds = ds.map(add_prefix, num_proc=4)
        ```
        Nc                     i | ]}|d S Nr?   r@   rL   s     r/   rT   z#DatasetDict.map.<locals>.<dictcomp>Q      666A4666r1   c                 p    i | ]2\  }}||                     |         	
           3S ))r   r   r   r   r   r   r   r   r   r   cache_file_namer   r7   r   r   r   r   map)r@   rL   r.   r   r   r   r   r   r   r7   r   r   r   r   r   r   r   r   r   r   s      r/   rT   z#DatasetDict.map.<locals>.<dictcomp>S  s|       ( Aw' 7;;%!-'"/#)$3#1#1)=$4Q$7&7%%5'%#     r1   rm   )r-   r   r   r   r   r   r   r   r   r   r   r   r   r7   r   r   r   r   s    `````````````````r/   r   zDatasetDict.map  s    H 	!!!#66666                   ( #'**,,)  
 
 	
r1   c                    	
 |                                   d | D             t          
	fd|                                 D                       S )a  Apply a filter function to all the elements in the table in batches
        and update the table so that the dataset only includes examples according to the filter function.
        The transformation is applied to all the datasets of the dataset dictionary.

        Args:
            function (`callable`):
                With one of the following signature:
                - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
                - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
                - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
                - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if ``with_indices=True, batched=True`
            with_indices (`bool`, defaults to `False`):
                Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
            input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
                The columns to be passed into `function` as
                positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
            batched (`bool`, defaults to `False`):
                Provide batch of examples to `function`.
            batch_size (`int`, *optional*, defaults to `1000`):
                Number of examples per batch provided to `function` if `batched=True`
                `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
            keep_in_memory (`bool`, defaults to `False`):
                Keep the dataset in memory instead of writing it to a cache file.
            load_from_cache_file (`Optional[bool]`, defaults to `True` if chaching is enabled):
                If a cache file storing the current computation from `function`
                can be identified, use it instead of recomputing.
            cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
                Provide the name of a path for the cache file. It is used to store the
                results of the computation instead of the automatically generated cache file name.
                You have to provide one `cache_file_name` per dataset in the dataset dictionary.
            writer_batch_size (`int`, defaults to `1000`):
                Number of rows per write operation for the cache file writer.
                This value is a good trade-off between memory usage during the processing, and processing speed.
                Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
            fn_kwargs (`Dict`, *optional*, defaults to `None`):
                Keyword arguments to be passed to `function`
            num_proc (`int`, *optional*, defaults to `None`):
                Number of processes for multiprocessing. By default it doesn't
                use multiprocessing.
            desc (`str`, *optional*, defaults to `None`):
                Meaningful description to be displayed alongside with the progress bar while filtering examples.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds.filter(lambda x: x["label"] == 1)
        DatasetDict({
            train: Dataset({
                features: ['text', 'label'],
                num_rows: 4265
            })
            validation: Dataset({
                features: ['text', 'label'],
                num_rows: 533
            })
            test: Dataset({
                features: ['text', 'label'],
                num_rows: 533
            })
        })
        ```
        Nc                     i | ]}|d S r   r?   r   s     r/   rT   z&DatasetDict.filter.<locals>.<dictcomp>  r   r1   c                 f    i | ]-\  }}||                     	
|                    .S ))r   r   r   r   r   r   r   r   r   r   r   r   filter)r@   rL   r.   r   r   r   r   r   r   r   r   r   r   r   r   s      r/   rT   z&DatasetDict.filter.<locals>.<dictcomp>  sm        Aw 7>>%!-"/#)#1)=$4Q$7&7'% "    r1   rm   )r-   r   r   r   r   r   r   r   r   r   r   r   r   s    ````````````r/   r   zDatasetDict.filterk  s    ^ 	!!!#66666               #'**,,  
 
 	
r1   
deprecatedat_endreversenull_placementindices_cache_file_namesc	           
          |                                   d | D             t          fd|                                 D                       S )a
  Create a new dataset sorted according to a single or multiple columns.

        Args:
            column_names (`Union[str, Sequence[str]]`):
                Column name(s) to sort by.
            reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
                If `True`, sort by descending order rather than ascending. If a single bool is provided,
                the value is applied to the sorting of all column names. Otherwise a list of bools with the
                same length and order as column_names must be provided.
            kind (`str`, *optional*):
                Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
                The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general,
                the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
                <Deprecated version="2.8.0">

                `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.

                </Deprecated>
            null_placement (`str`, defaults to `at_end`):
                Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
            keep_in_memory (`bool`, defaults to `False`):
                Keep the sorted indices in memory instead of writing it to a cache file.
            load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
                If a cache file storing the sorted indices
                can be identified, use it instead of recomputing.
            indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
                Provide the name of a path for the cache file. It is used to store the
                indices mapping instead of the automatically generated cache file name.
                You have to provide one `cache_file_name` per dataset in the dataset dictionary.
            writer_batch_size (`int`, defaults to `1000`):
                Number of rows per write operation for the cache file writer.
                Higher value gives smaller cache files, lower value consume less temporary memory.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset('rotten_tomatoes')
        >>> ds['train']['label'][:10]
        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
        >>> sorted_ds = ds.sort('label')
        >>> sorted_ds['train']['label'][:10]
        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
        >>> another_sorted_ds['train']['label'][:10]
        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
        ```
        Nc                     i | ]}|d S r   r?   r   s     r/   rT   z$DatasetDict.sort.<locals>.<dictcomp>      '>'>'>A4'>'>'>r1   c                 ^    i | ])\  }}||                     	|         
           *S ))rd   r   kindr   r   r   indices_cache_file_namer   )sort)r@   rL   r.   rd   r   r   r   r   r   r   r   s      r/   rT   z$DatasetDict.sort.<locals>.<dictcomp>  sa        Aw 7<<!-##1#1)=,DQ,G&7   	 	  r1   rm   )	r-   rd   r   r   r   r   r   r   r   s	    ````````r/   r   zDatasetDict.sort  s    v 	!!!#+'>'>'>'>'>$           #'**,,  
 
 	
r1   seedsseed
generatorsc                 h   |                                   |t          d          ||nd | D             n#t          t                    sfd| D             d | D             d | D             t	          fd|                                 D                       S )a  Create a new Dataset where the rows are shuffled.

        The transformation is applied to all the datasets of the dataset dictionary.

        Currently shuffling uses numpy random generators.
        You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).

        Args:
            seeds (`Dict[str, int]` or `int`, *optional*):
                A seed to initialize the default BitGenerator if `generator=None`.
                If `None`, then fresh, unpredictable entropy will be pulled from the OS.
                If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
                You can provide one `seed` per dataset in the dataset dictionary.
            seed (`int`, *optional*):
                A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
            generators (`Dict[str, *optional*, np.random.Generator]`):
                Numpy random Generator to use to compute the permutation of the dataset rows.
                If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
                You have to provide one `generator` per dataset in the dataset dictionary.
            keep_in_memory (`bool`, defaults to `False`):
                Keep the dataset in memory instead of writing it to a cache file.
            load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
                If a cache file storing the current computation from `function`
                can be identified, use it instead of recomputing.
            indices_cache_file_names (`Dict[str, str]`, *optional*):
                Provide the name of a path for the cache file. It is used to store the
                indices mappings instead of the automatically generated cache file name.
                You have to provide one `cache_file_name` per dataset in the dataset dictionary.
            writer_batch_size (`int`, defaults to `1000`):
                Number of rows per write operation for the cache file writer.
                This value is a good trade-off between memory usage during the processing, and processing speed.
                Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes")
        >>> ds["train"]["label"][:10]
        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]

        # set a seed
        >>> shuffled_ds = ds.shuffle(seed=42)
        >>> shuffled_ds["train"]["label"][:10]
        [0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
        ```
        Nz*Please specify seed or seeds, but not bothc                     i | ]}|d S r   r?   r   s     r/   rT   z'DatasetDict.shuffle.<locals>.<dictcomp>]  s    +++Q+++r1   c                     i | ]}|S r?   r?   )r@   rL   r   s     r/   rT   z'DatasetDict.shuffle.<locals>.<dictcomp>_  s    ,,,!Q,,,r1   c                     i | ]}|d S r   r?   r   s     r/   rT   z'DatasetDict.shuffle.<locals>.<dictcomp>a      000a!T000r1   c                     i | ]}|d S r   r?   r   s     r/   rT   z'DatasetDict.shuffle.<locals>.<dictcomp>c  r   r1   c                 r    i | ]3\  }}||                     |         |         |                    4S ))r   	generatorr   r   r   r   shuffle)	r@   rL   r.   r   r   r   r   r   r   s	      r/   rT   z'DatasetDict.shuffle.<locals>.<dictcomp>e  sc     
 
 
 Aw 7??q(m#1)=,DQ,G&7 #  
 
 
r1   )r0   r8   r)   dictr%   r5   )r-   r   r   r   r   r   r   r   s    ` `````r/   r
  zDatasetDict.shuffle  s   r 	!!! 1IJJJ(e=++d+++EEE4(( 	-,,,,t,,,E004000J#+'>'>'>'>'>$
 
 
 
 
 
 
 
 
 #'**,,
 
 

 
 	
r1   dataset_dict_pathmax_shard_size
num_shardsstorage_optionsc                 f   |dk    r!t          j        dt                     |j        }t	          j        ||          }|d         }t          |           }|rt          j        j	        nt          j	        }	|d | D             }n$t          |t                    st          d          |r7t          |                                                              dd	           n|                    |d
           |                     |	|t&          j                  dd          5 }
t+          j        dt/          |           i|
           ddd           n# 1 swxY w Y   |                                 D ]<\  }}|                     |	||          |                    |          |||           =dS )aU	  
        Saves a dataset dict to a filesystem using either [`~filesystems.S3FileSystem`] or
        `fsspec.spec.AbstractFileSystem`.

        For [`Image`] and [`Audio`] data:

        All the Image() and Audio() data are stored in the arrow files.
        If you want to store paths or urls, please use the Value("string") type.

        Args:
            dataset_dict_path (`str`):
                Path (e.g. `dataset/train`) or remote URI
                (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be
                saved to.
            fs (`fsspec.spec.AbstractFileSystem`, *optional*):
                Instance of the remote filesystem where the dataset will be saved to.

                <Deprecated version="2.8.0">

                `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
                Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`

                </Deprecated>

            max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
                The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
                (like `"50MB"`).
            num_shards (`Dict[str, int]`, *optional*):
                Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
                You need to provide the number of shards for each dataset in the dataset dictionary.
                Use a dictionary to define a different num_shards for each split.

                <Added version="2.8.0"/>
            num_proc (`int`, *optional*, default `None`):
                Number of processes when downloading and generating the dataset locally.
                Multiprocessing is disabled by default.

                <Added version="2.8.0"/>
            storage_options (`dict`, *optional*):
                Key/value pairs to be passed on to the file-system backend, if any.

                <Added version="2.8.0"/>

        Example:

        ```python
        >>> dataset_dict.save_to_disk("path/to/dataset/directory")
        >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
        >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
        ```
        r   'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.
You can remove this warning by passing 'storage_options=fs.storage_options' instead.r  r   Nc                     i | ]}|d S r   r?   r   s     r/   rT   z,DatasetDict.save_to_disk.<locals>.<dictcomp>  r  r1   gPlease provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}T)parentsexist_ok)r  wutf-8encodingsplits)r  r  r   r  )warningswarnFutureWarningr  fsspecget_fs_token_pathsr   ospathrz   	posixpathr)   r  r8   r   resolvemkdirmakedirsopenr   DATASETDICT_JSON_FILENAMEjsondumpr4   r5   save_to_diskget)r-   r  fsr  r  r   r  fs_token_pathsis_local	path_joinfrL   r.   s                r/   r+  zDatasetDict.save_to_diskr  s   x Mg  
 !0O23DVefff(6q(9+B///$,@BGLL).	004000JJJ-- 	y    	:"##++--33D43PPPPKK)DK999WWYY0&2RSSUXcjWkk 	1opIxd,a000	1 	1 	1 	1 	1 	1 	1 	1 	1 	1 	1 	1 	1 	1 	1**,, 	 	JAw  	+Q//%>>!,,-! / !    	 	s   $%EEEc                    |dk    r!t          j        dt                     |j        }t	          j        | |          }|d         }t          |          rt          |           }t          j	        }n't	          j
        d          }| }t          j        j	        } ||t          j                  } ||t          j                  } ||t          j                  }	|                    |          sP|                    |	          r(|                    |          rt%          d| d          t%          d| d          |                    |d	d
          5 }
t)          j        |
          d         }ddd           n# 1 swxY w Y   t-                      }|D ]b}t          |          r+|                     d          d         dz    |||          z   n |||          }t1          j        |||          ||<   c|S )a  
        Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using either
        [`~filesystems.S3FileSystem`] or `fsspec.spec.AbstractFileSystem`.

        Args:
            dataset_dict_path (`str`):
                Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
                of the dataset dict directory where the dataset dict will be loaded from.
            fs (`fsspec.spec.AbstractFileSystem`, *optional*):
                Instance of the remote filesystem where the dataset will be saved to.

                <Deprecated version="2.8.0">

                `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
                Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`

                </Deprecated>

            keep_in_memory (`bool`, defaults to `None`):
                Whether to copy the dataset in-memory. If `None`, the
                dataset will not be copied in-memory unless explicitly enabled by setting
                `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
                [improve performance](../cache#improve-performance) section.
            storage_options (`dict`, *optional*):
                Key/value pairs to be passed on to the file-system backend, if any.

                <Added version="2.8.0"/>

        Returns:
            [`DatasetDict`]

        Example:

        ```py
        >>> ds = load_from_disk('path/to/dataset/directory')
        ```
        r   r  r  r   filezNo such file: 'z'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead.zU'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`.rr  r  r  Nz://)r   r  )r  r  r  r  r  r   r   r   r#  rz   
filesystemr!  r"  r   r(  DATASET_STATE_JSON_FILENAMEDATASET_INFO_FILENAMEisfileFileNotFoundErrorr'  r)  loadr%   rA   r   load_from_disk)r  r-  r   r  r.  dest_dataset_dict_pathr0  dataset_dict_json_pathdataset_state_json_pathdataset_info_pathr1  r  dataset_dictrL   dataset_dict_split_paths                  r/   r;  zDatasetDict.load_from_disk  s   X Mg  
 !0O23DVefff(6q(9## 	%%:;L%M%M"!II"6**B%6"I!*+A6Cc!d!d"+),BFDf"g"g%I&<f>Z[[yy/00 	yy*++ 		:Q0R0R ' P&<  P  P  P   $ P"8  P  P  P   WW+S7WCC 	,qYq\\(+F	, 	, 	, 	, 	, 	, 	, 	, 	, 	, 	, 	, 	, 	, 	, #}} 	 	A (++:!''..q1E9IIF\^_<`<```Y5q99 $
 &4'Xg  LOO s   E>>FFpath_or_paths	cache_dirc                 L    ddl m}  || f|||d|                                S )a+  Create [`DatasetDict`] from CSV file(s).

        Args:
            path_or_paths (`dict` of path-like):
                Path(s) of the CSV file(s).
            features ([`Features`], *optional*):
                Dataset features.
            cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
                Directory to cache data.
            keep_in_memory (`bool`, defaults to `False`):
                Whether to copy the data in-memory.
            **kwargs (additional keyword arguments):
                Keyword arguments to be passed to [`pandas.read_csv`].

        Returns:
            [`DatasetDict`]

        Example:

        ```py
        >>> from datasets import DatasetDict
        >>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
        ```
        r   )CsvDatasetReaderr7   rC  r   )io.csvrE  read)rB  r7   rC  r   kwargsrE  s         r/   from_csvzDatasetDict.from_csv,  sR    B 	-,,,,,
$,	R`
 
dj
 

$&&	r1   c                 L    ddl m}  || f|||d|                                S )aE  Create [`DatasetDict`] from JSON Lines file(s).

        Args:
            path_or_paths (`path-like` or list of `path-like`):
                Path(s) of the JSON Lines file(s).
            features ([`Features`], *optional*):
                Dataset features.
            cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
                Directory to cache data.
            keep_in_memory (`bool`, defaults to `False`):
                Whether to copy the data in-memory.
            **kwargs (additional keyword arguments):
                Keyword arguments to be passed to [`JsonConfig`].

        Returns:
            [`DatasetDict`]

        Example:

        ```py
        >>> from datasets import DatasetDict
        >>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
        ```
        r   )JsonDatasetReaderrF  )io.jsonrL  rH  )rB  r7   rC  r   rI  rL  s         r/   	from_jsonzDatasetDict.from_jsonS  R    B 	/.....  
$,	R`
 
dj
 

$&&	r1   c                 N    ddl m}  || f||||d|                                S )a5  Create [`DatasetDict`] from Parquet file(s).

        Args:
            path_or_paths (`dict` of path-like):
                Path(s) of the CSV file(s).
            features ([`Features`], *optional*):
                Dataset features.
            cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
                Directory to cache data.
            keep_in_memory (`bool`, defaults to `False`):
                Whether to copy the data in-memory.
            columns (`List[str]`, *optional*):
                If not `None`, only these columns will be read from the file.
                A column name may be a prefix of a nested field, e.g. 'a' will select
                'a.b', 'a.c', and 'a.d.e'.
            **kwargs (additional keyword arguments):
                Keyword arguments to be passed to [`ParquetConfig`].

        Returns:
            [`DatasetDict`]

        Example:

        ```py
        >>> from datasets import DatasetDict
        >>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
        ```
        r   )ParquetDatasetReader)r7   rC  r   r   )
io.parquetrQ  rH  )rB  r7   rC  r   r   rI  rQ  s          r/   from_parquetzDatasetDict.from_parquetz  sZ    L 	544444##
)
 
 
 
 $&&	r1   c                 L    ddl m}  || f|||d|                                S )a+  Create [`DatasetDict`] from text file(s).

        Args:
            path_or_paths (`dict` of path-like):
                Path(s) of the text file(s).
            features ([`Features`], *optional*):
                Dataset features.
            cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
                Directory to cache data.
            keep_in_memory (`bool`, defaults to `False`):
                Whether to copy the data in-memory.
            **kwargs (additional keyword arguments):
                Keyword arguments to be passed to [`TextConfig`].

        Returns:
            [`DatasetDict`]

        Example:

        ```py
        >>> from datasets import DatasetDict
        >>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
        ```
        r   )TextDatasetReaderrF  )io.textrU  rH  )rB  r7   rC  r   rI  rU  s         r/   	from_textzDatasetDict.from_text  rO  r1   r   taskidc                     |                                   t          fd|                                 D                       S )Nc                 F    i | ]\  }}||                                S ))rX  rY  )prepare_for_task)r@   rL   r.   rY  rX  s      r/   rT   z0DatasetDict.prepare_for_task.<locals>.<dictcomp>  s3    iiijaQXAw77Tb7IIiiir1   rm   )r-   rX  rY  s    ``r/   r\  zDatasetDict.prepare_for_task  sI    !!!iiiii\`\f\f\h\hiiijjjr1   label2idlabel_columnc                     |                                   t          fd|                                 D                       S )Nc                 F    i | ]\  }}||                                S ))r]  r^  )align_labels_with_mapping)r@   rL   r.   r]  r^  s      r/   rT   z9DatasetDict.align_labels_with_mapping.<locals>.<dictcomp>  sC       Aw 744hUa4bb  r1   rm   )r-   r]  r^  s    ``r/   ra  z%DatasetDict.align_labels_with_mapping  s[    !!!    "&**,,  
 
 	
r1   Tprivatetokenbranchembed_external_filesc                    |d | D             }n$t          |t                    st          d          |                                  |                                  d}d}	t          t          |                                                     j        	                                }
t                      |
_        |                                 D ]7}t          j        t          |          st          dt           d| d          8|                                 D ]}t                               d| d	           | |                             |||||||                    |          |
          \  }}}}}}||z  }|	|z  }	t)          t+          |          |t-          | |                             |
j        |<   d|
_        ||
_        |	|
_        ||	z   |
_        t7          t8          j                  }|                    |d||          }t8          j        |v rtA                      }|!                    d           |
"                    |d           |!                    d           t7          t8          j                  #                    |$                                t8          j        ||d|           d|v rtK                      }d|_&        ||_'        tQ          tS          |d          |          }tU          j+        tY          |                    }t[          |d          5 }|.                                }ddd           n# 1 swxY w Y   n-tU                      }d|/                    d          d          d}ta          d|
i          1                    |           t7          t8          j                  #                    |2                    |          3                                d||d|           dS )a  Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
        The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.

        Each dataset split will be pushed independently. The pushed dataset will keep the original split names.

        The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
        data, the Parquet files will store the bytes of your images or audio files.
        You can disable this by setting `embed_external_files` to False.

        Args:
            repo_id (`str`):
                The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
                `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
                of the logged-in user.
            private (`bool`, *optional*):
                Whether the dataset repository should be set to private or not. Only affects repository creation:
                a repository that already exists will not be affected by that parameter.
            token (`str`, *optional*):
                An optional authentication token for the Hugging Face Hub. If no token is passed, will default
                to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
                if no token is passed and the user is not logged-in.
            branch (`str`, *optional*):
                The git branch on which to push the dataset.
            max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
                The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
                (like `"500MB"` or `"1GB"`).
            num_shards (`Dict[str, int]`, *optional*):
                Number of shards to write. By default the number of shards depends on `max_shard_size`.
                Use a dictionary to define a different num_shards for each split.

                <Added version="2.8.0"/>
            embed_external_files (`bool`, defaults to `True`):
                Whether to embed file bytes in the shards.
                In particular, this will do the following before the push for the fields of type:

                - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.

        Example:

        ```python
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
        ```
        Nc                     i | ]}|d S r   r?   r   s     r/   rT   z+DatasetDict.push_to_hub.<locals>.<dictcomp>  r  r1   r  r   zSplit name should match 'z' but got 'z'.zPushing split z to the Hub.)rA   rb  rc  rd  r  r  re  )	num_bytesnum_examples)endpointr.   )	repo_typerevisionrc  s   {"default": T)pretty_print   })path_or_fileobjpath_in_reporepo_idrc  rk  rl  z	README.mdzDownloading metadata)download_configr  r  z# Dataset Card for "/r3   z"

[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)default)4r)   r  r8   r0   r;   nextiterr(   infor   r   r  keysr{   matchr   loggerwarning_push_parquet_shards_to_hubr,  r   rC   rD   download_checksumsdownload_sizedataset_sizesize_in_bytesr   r   HF_ENDPOINTlist_repo_filesDATASETDICT_INFOS_FILENAMEr   write
_dump_infoupload_filegetvaluer   download_descuse_auth_tokenr!   r"   r   from_readmer   r'  rH  rA   r   to_metadata
_to_readmeencode)r-   rq  rb  rc  rd  r  r  re  total_uploaded_sizetotal_dataset_nbytesinfo_to_dumprA   uploaded_sizedataset_nbytes_api
repo_filesbufferrr  dataset_readme_pathdataset_metadatareadme_filereadme_contents                          r/   push_to_hubzDatasetDict.push_to_hub  sw   r 004000JJJ-- 	y   	!!!##%%% $(dkkmm)<)<$=$=$B$G$G$I$I'kkYY[[ 	^ 	^E8Iu-- ^ !\Y!\!\SX!\!\!\]]]^ YY[[ 	x 	xENN?E???@@@BFu+BiBi-%>>%00%9 Cj 	C 	C?GUM>1a  =0 N2 )23u::fijnotjufvfv)w)w)wL&&*.'%8"$8!%8;O%O"V/000((IPV^c(dd
 ,
::YYFLL)))##F#>>>LL6-...:: & 1 1#># ;    *$$,..O,BO)-2O*"-7K00 /# # #  /:4@S;T;TUU)G<<< 4!,!1!1!3!34 4 4 4 4 4 4 4 4 4 4 4 4 4 4  /00 UGMM#4F4Fr4J  U  U  UN)\233??@PQQQv)***66,77GGNNPP$ 	7 	
 	
 	
 	
 	
s   )M

MM)rh   )r<   r%   )F)NNF)NF)NFFNFr   FNFNNr   NFNNN)FNFr   FNNr   NNN)Fr   r   FNNr   )NNNFNNr   )r   NNNN)r   NN)NNFN)r   )FNNNNT)D__name__
__module____qualname____doc__r0   r;   r   rF   propertyr   rC   r   rR   rY   intr]   r`   r   rd   r
   rg   rl   rq   rt   r   r   r   r   r   r   r   r   r   boolr   
contextlibcontextmanagerr   r   r   r   r   r   r   r   r  r   r   r	   r   nprandom	Generatorr
  r#   r+  staticmethodr;  rJ  rN  rS  rW  r    r\  r   ra  r  __classcell__)rO   s   @r/   r%   r%   '   s(       jjw w w
         @d3:& @ @ @ X@ GT#t)_ G G G XG" GT#s(^ G G G XG D$sCx. D D D XD Hd3S	>2 H H H XH" AtCsO, A A A XAe e e e e@JS JT#t)_ J J J J0QT#s(^ Q Q Q Q&. . .
!`X !`- !` !` !` !`Ft# t= t t t t:%r5d3i+@ %r] %r %r %r %rN+
# +
 +
P] +
 +
 +
 +
Z$vT#s(^ $v $v $v $v $vL"r5d3i+@ "r] "r "r "r "rH
 
# 
d 
} 
 
 
 
@  #"&#(	" "sm" $" !	" " " "L #"&#(	,s ,ssm,s $,s !	,s ,s ,s ,s\! ! !F #'#(	v vH%v $v !	v v v v4 #"&#(	2 2sm2 $2 !	2 
2 2 2 2n #'#(	4 4H%4 $4 !	4
 
4 4 4 4p (,"9=$( %:>$/3?C+/'+!&$("&"%~
 ~
8$~
 ~
 	~

  c49n 56~
 ~
 SM~
 ~
 !sDI~!67~
 ~
 'tn~
 #4Xc](:#;<~
 $C=~
 8$~
 ~
  D>!~
" 3-#~
$ sm%~
& 
'~
 ~
 ~
 ~
F 9=$($/3?C+/$("&"d
 d
  c49n 56	d

 d
 SMd
 d
 'tnd
 #4Xc](:#;<d
 $C=d
 D>d
 3-d
 smd
 
d
 d
 d
 d
R 05&$/3GK+/L
 L
C#./L
 tXd^+,L

 L
 L
 'tnL
 #+4Xc]0B+C"DL
 $C=L
 
L
 L
 L
 L
` AE"?C$/3GK+/Q
 Q
c4Xc](:#;;<=Q
 smQ
 T#ry':":;<	Q

 Q
 'tnQ
 #+4Xc]0B+C"DQ
 $C=Q
 
Q
 Q
 Q
 Q
l 48/3"&*.^ ^#^ !sCx1	^
 T#s(^,^ 3-^ "$^ ^ ^ ^@  )-*.	W W#W !W "$	W
 
W W W \Wr  (,$	$ $CM*$8$$ $ 	$ 
$ $ $ \$L  (,$	$ $CM*$8$$ $ 	$ 
$ $ $ \$L  (,$'+. .CM*.8$. . 	.
 $s)$. 
. . . \.`  (,$	$ $CM*$8$$ $ 	$ 
$ $ $ \$L g.//k kU3+<%= k3 k} k k k 0/k g788
$ 
c 
m 
 
 
 98
 #(#!%48/3%)H
 H
 $H
 }	H

 H
 !sCx1H
 T#s(^,H
 #H
 H
 H
 H
 H
 H
 H
 H
r1   r%   c                      e Zd Z	 d dee         dd fdZ	 	 	 	 	 	 	 d!dee         ded	eeee	e         f                  d
ede
dedeeee	e         f                  dd fdZ	 	 	 	 	 d"dee         d	eeee	e         f                  d
edee
         dd f
dZ	 d#deej        j                 de
dd fdZdededd fdZdeeef         dd fdZdeee	e         f         dd fdZdeee	e         f         dd fdZdededd fdZdedd fdZdS )$IterableDatasetDictNr+   r<   c                 ^    t          fd|                                 D                       S )a  
        Return a dataset with the specified format.
        This method only supports the "torch" format for now.
        The format is set to all the datasets of the dataset dictionary.

        Args:
            type (`str`, *optional*, defaults to `None`):
                If set to "torch", the returned dataset
                will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> from transformers import AutoTokenizer
        >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
        >>> def encode(example):
        ...     return tokenizer(examples["text"], truncation=True, padding="max_length")
        >>> ds = ds.map(encode, batched=True, remove_columns=["text"])
        >>> ds = ds.with_format("torch")
        ```
        c                 D    i | ]\  }}||                                S ))r+   )r   )r@   rL   r.   r+   s      r/   rT   z3IterableDatasetDict.with_format.<locals>.<dictcomp>  s0    #e#e#e*!WAw':':':'E'E#e#e#er1   r  r5   )r-   r+   s    `r/   r   zIterableDatasetDict.with_formatm  s5    6 ##e#e#e#eX\XbXbXdXd#e#e#efffr1   Fr   r   r   r   r   r   r   r   c           	      v    t          fd|                                 D                       S )an  
        Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
        If your function returns a column that already exists, then it overwrites it.
        The function is applied on-the-fly on the examples when iterating over the dataset.
        The transformation is applied to all the datasets of the dataset dictionary.

        You can specify whether the function should be batched or not with the `batched` parameter:

        - If batched is `False`, then the function takes 1 example in and should return 1 example.
          An example is a dictionary, e.g. `{"text": "Hello there !"}`.
        - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
          A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
        - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
          Note that the last batch may have less than `n` examples.
          A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.

        Args:
            function (`Callable`, *optional*, defaults to `None`):
                Function applied on-the-fly on the examples when you iterate on the dataset.
                It must have one of the following signatures:

                - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
                - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
                - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
                - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`

                For advanced usage, the function can also return a `pyarrow.Table`.
                Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
                If no function is provided, default to identity function: `lambda x: x`.
            with_indices (`bool`, defaults to `False`):
                Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
            input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
                The columns to be passed into `function`
                as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
            batched (`bool`, defaults to `False`):
                Provide batch of examples to `function`.
            batch_size (`int`, *optional*, defaults to `1000`):
                Number of examples per batch provided to `function` if `batched=True`.
            drop_last_batch (`bool`, defaults to `False`):
                Whether a last batch smaller than the `batch_size` should be
                dropped instead of being processed by the function.
            remove_columns (`[List[str]]`, *optional*, defaults to `None`):
                Remove a selection of columns while doing the mapping.
                Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
                columns with names in `remove_columns`, these columns will be kept.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> def add_prefix(example):
        ...     example["text"] = "Review: " + example["text"]
        ...     return example
        >>> ds = ds.map(add_prefix)
        >>> next(iter(ds["train"]))
        {'label': 1,
         'text': 'Review: the rock is destined to be the 21st century's new " conan " and that he's going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
        ```
        c                 P    i | ]"\  }}||                     	           #S ))r   r   r   r   r   r   r   r   )
r@   rL   r.   r   r   r   r   r   r   r   s
      r/   rT   z+IterableDatasetDict.map.<locals>.<dictcomp>  sY        Aw 7;;%!-"/#)$3#1     r1   r  )r-   r   r   r   r   r   r   r   s    ```````r/   r   zIterableDatasetDict.map  sl    L #          #'**,,  
 
 	
r1   c                 n    t          fd|                                 D                       S )a  Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
        The filtering is done on-the-fly when iterating over the dataset.
        The filtering is applied to all the datasets of the dataset dictionary.

        Args:
            function (`Callable`):
                Callable with one of the following signatures:

                - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
                - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
                - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
                - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`

                If no function is provided, defaults to an always True function: `lambda x: True`.
            with_indices (`bool`, defaults to `False`):
                Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
            input_columns (`str` or `List[str]`, *optional*):
                The columns to be passed into `function` as
                positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
            batched (`bool`, defaults to `False`):
                Provide batch of examples to `function`
            batch_size (`int`, *optional*, defaults to `1000`):
                Number of examples per batch provided to `function` if `batched=True`.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> ds = ds.filter(lambda x: x["label"] == 0)
        >>> list(ds["train"].take(3))
        [{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
         {'label': 0,
         'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
         {'label': 0,
         'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
        ```
        c           
      L    i | ] \  }}||                                !S ))r   r   r   r   r   r   )r@   rL   r.   r   r   r   r   r   s      r/   rT   z.IterableDatasetDict.filter.<locals>.<dictcomp>  sS     	 	 	 Aw 7>>%!-"/#) "  	 	 	r1   r  )r-   r   r   r   r   r   s    `````r/   r   zIterableDatasetDict.filter  s`    \ #	 	 	 	 	 	 	 	 #'**,,	 	 	
 
 	
r1   r  buffer_sizec                 f    t          fd|                                 D                       S )a  
        Randomly shuffles the elements of this dataset.
        The shuffling is applied to all the datasets of the dataset dictionary.

        This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
        replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
        equal to the full size of the dataset is required.

        For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
        initially select a random element from only the first 1000 elements in the buffer. Once an element is
        selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
        maintaining the 1000 element buffer.

        If the dataset is made of several shards, it also does `shuffle` the order of the shards.
        However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
        then the order of the shards is kept unchanged.

        Args:
            seed (`int`, *optional*, defaults to `None`):
                Random seed that will be used to shuffle the dataset.
                It is used to sample from the shuffle buffe and als oto shuffle the data shards.
            generator (`numpy.random.Generator`, *optional*):
                Numpy random Generator to use to compute the permutation of the dataset rows.
                If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
            buffer_size (`int`, defaults to `1000`):
                Size of the buffer.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> list(ds["train"].take(3))
        [{'label': 1,
         'text': 'the rock is destined to be the 21st century's new " conan " and that he's going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
         {'label': 1,
         'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson's expanded vision of j . r . r . tolkien's middle-earth .'},
         {'label': 1, 'text': 'effective but too-tepid biopic'}]
        >>> ds = ds.shuffle(seed=42)
        >>> list(ds["train"].take(3))
        [{'label': 1,
         'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
         {'label': 1,
         'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
         {'label': 1,
         'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
        ```
        c                 H    i | ]\  }}||                                S ))r   r  r  r	  )r@   rL   r.   r  r  r   s      r/   rT   z/IterableDatasetDict.shuffle.<locals>.<dictcomp>N  sA       Aw 7??	{?[[  r1   r  )r-   r   r  r  s    ```r/   r
  zIterableDatasetDict.shuffle  sR    f #     "&**,,  
 
 	
r1   r   r   c                 b    t          fd|                                 D                       S )a  
        Rename a column in the dataset, and move the features associated to the original column under the new column
        name.
        The renaming is applied to all the datasets of the dataset dictionary.

        Args:
            original_column_name (`str`):
                Name of the column to rename.
            new_column_name (`str`):
                New name for the column.

        Returns:
            [`IterableDatasetDict`]: A copy of the dataset with a renamed column.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> ds = ds.rename_column("text", "movie_review")
        >>> next(iter(ds["train"]))
        {'label': 1,
         'movie_review': 'the rock is destined to be the 21st century's new " conan " and that he's going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
        ```
        c                 F    i | ]\  }}||                                S r   r   r   s      r/   rT   z5IterableDatasetDict.rename_column.<locals>.<dictcomp>o  r   r1   r  r   s    ``r/   r   z!IterableDatasetDict.rename_columnT  sK    4 #    "&**,,  
 
 	
r1   r   c                 ^    t          fd|                                 D                       S )a  
        Rename several columns in the dataset, and move the features associated to the original columns under
        the new column names.
        The renaming is applied to all the datasets of the dataset dictionary.

        Args:
            column_mapping (`Dict[str, str]`):
                A mapping of columns to rename to their new names.

        Returns:
            [`IterableDatasetDict`]: A copy of the dataset with renamed columns

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
        >>> next(iter(ds["train"]))
        {'movie_review': 'the rock is destined to be the 21st century's new " conan " and that he's going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
         'rating': 1}
        ```
        c                 D    i | ]\  }}||                                S r   r   r   s      r/   rT   z6IterableDatasetDict.rename_columns.<locals>.<dictcomp>  s0    eee*!WQ&&n&EEeeer1   r  r   s    `r/   r   z"IterableDatasetDict.rename_columnsu  s;    0 #eeeeX\XbXbXdXdeee
 
 	
r1   rd   c                 ^    t          fd|                                 D                       S )a  
        Remove one or several column(s) in the dataset and the features associated to them.
        The removal is done on-the-fly on the examples when iterating over the dataset.
        The removal is applied to all the datasets of the dataset dictionary.


        Args:
            column_names (`Union[str, List[str]]`):
                Name of the column(s) to remove.

        Returns:
            [`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> ds = ds.remove_columns("label")
        >>> next(iter(ds["train"]))
        {'text': 'the rock is destined to be the 21st century's new " conan " and that he's going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
        ```
        c                 B    i | ]\  }}||                               S r?   r   r   s      r/   rT   z6IterableDatasetDict.remove_columns.<locals>.<dictcomp>  0    #k#k#kPZPQSZAw'='=l'K'K#k#k#kr1   r  r   s    `r/   r   z"IterableDatasetDict.remove_columns  5    0 ##k#k#k#k^b^h^h^j^j#k#k#klllr1   c                 ^    t          fd|                                 D                       S )a  Select one or several column(s) in the dataset and the features
        associated to them. The selection is done on-the-fly on the examples
        when iterating over the dataset. The selection is applied to all the
        datasets of the dataset dictionary.


        Args:
            column_names (`Union[str, List[str]]`):
                Name of the column(s) to keep.

        Returns:
            [`IterableDatasetDict`]: A copy of the dataset object with only selected columns.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> ds = ds.select("text")
        >>> next(iter(ds["train"]))
        {'text': 'the rock is destined to be the 21st century's new " conan " and that he's going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
        ```
        c                 B    i | ]\  }}||                               S r?   r   r   s      r/   rT   z6IterableDatasetDict.select_columns.<locals>.<dictcomp>  r  r1   r  r   s    `r/   r   z"IterableDatasetDict.select_columns  r  r1   rn   r   c                 b    t          fd|                                 D                       S )af  Cast column to feature for decoding.
        The type casting is applied to all the datasets of the dataset dictionary.

        Args:
            column (`str`):
                Column name.
            feature ([`Feature`]):
                Target feature.

        Returns:
            [`IterableDatasetDict`]

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> ds["train"].features
        {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
         'text': Value(dtype='string', id=None)}
        >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
        >>> ds["train"].features
        {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
         'text': Value(dtype='string', id=None)}
        ```
        c                 F    i | ]\  }}||                                S r   r   r   s      r/   rT   z3IterableDatasetDict.cast_column.<locals>.<dictcomp>  s2    ccc
7Q##67#CCcccr1   r  r   s    ``r/   r   zIterableDatasetDict.cast_column  s?    6 #cccccVZV`V`VbVbccc
 
 	
r1   r7   c                 ^    t          fd|                                 D                       S )aD  
        Cast the dataset to a new set of features.
        The type casting is applied to all the datasets of the dataset dictionary.

        Args:
            features (`Features`):
                New features to cast the dataset to.
                The name of the fields in the features must match the current column names.
                The type of the data must also be convertible from one type to the other.
                For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.

        Returns:
            [`IterableDatasetDict`]: A copy of the dataset with casted features.

        Example:

        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", streaming=True)
        >>> ds["train"].features
        {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
         'text': Value(dtype='string', id=None)}
        >>> new_features = ds["train"].features.copy()
        >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
        >>> new_features['text'] = Value('large_string')
        >>> ds = ds.cast(new_features)
        >>> ds["train"].features
        {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
         'text': Value(dtype='large_string', id=None)}
        ```
        c                 D    i | ]\  }}||                                S r   r   r   s      r/   rT   z,IterableDatasetDict.cast.<locals>.<dictcomp>  s-    #f#f#f:1gAw||X|'F'F#f#f#fr1   r  r   s    `r/   r   zIterableDatasetDict.cast  s6    F ##f#f#f#fY]YcYcYeYe#f#f#fgggr1   r   )NFNFr   FN)NFNFr   )NNr   )r  r  r  r   rC   r   r   r  r   r   r  r   r   r  r  r  r
  r   r   r   r   r   r   r   r   r   r?   r1   r/   r  r  l  s        #g gsmg 
g g g g> (,"9= %:>S
 S
8$S
 S
  c49n 56	S

 S
 S
 S
 !sDI~!67S
 
S
 S
 S
 S
n (,9=$(9
 9
8$9
  c49n 56	9

 9
 SM9
 
9
 9
 9
 9
x ^b8
 8
$,RY-@$A8
WZ8
	8
 8
 8
 8
t
# 
 
Pe 
 
 
 
B
T#s(^ 
@U 
 
 
 
8m5d3i+@ mEZ m m m m4m5d3i+@ mEZ m m m m4
# 
 
@U 
 
 
 
>#h#h 
#h #h #h #h #h #hr1   r  )Er  r   r)  r!  r#  r{   r  ior   pathlibr   typingr   r   r   r   r	   r
   r   r  numpyr  huggingface_hubr   datasets.utils.metadatar    r   arrow_datasetr   downloadr   r7   r   features.featuresr   filesystemsr   r   rw  r   r   namingr   r  r   r   r   r   tabler   tasksr   utilsr   utils.doc_utilsr    utils.file_utilsr!   	utils.hubr"   utils.typingr#   
get_loggerr  rz  r  r%   r  r?   r1   r/   <module>r     s         				     				              I I I I I I I I I I I I I I I I I I      ! ! ! ! ! ! 3 3 3 3 3 3       " " " " " " $ $ $ $ $ $       * * * * * * D D D D D D D D / / / / / / / /       ; ; ; ; ; ; ; ; ; ; ; ;                   - - - - - - ) ) ) ) ) ) ! ! ! ! ! ! " " " " " " 
	H	%	%B
 B
 B
 B
 B
$ B
 B
 B
J2[h [h [h [h [h$ [h [h [h [h [hr1   