pythonpytorchhuggingfacepytorch-dataloaderhuggingface-datasets

How does one create a pytoch data loader using an interleaved hugging face dataset?


When I interleave data sets, get a tokenized batch, feed the batch to the pytorch data loader, I get errors:

# -*- coding: utf-8 -*-
"""issues with dataloader and custom data sets

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1sbs95as_66mtK9VK_vbaE9gLE-Tjof1-
"""

!pip install datasets
!pip install pytorch
!pip install transformers

token = None
batch_size = 10
from datasets import load_dataset
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel

tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
if tokenizer.pad_token_id is None:
  tokenizer.pad_token = tokenizer.eos_token
probe_network = GPT2LMHeadModel.from_pretrained("gpt2")
device = torch.device(f"cuda:{0}" if torch.cuda.is_available() else "cpu")
probe_network = probe_network.to(device)

# -- Get batch from dataset
from datasets import load_dataset
# path, name = 'brando/debug1_af', 'debug1_af'
path, name = 'brando/debug0_af', 'debug0_af'
remove_columns = []
dataset = load_dataset(path, name, streaming=True, split="train", token=token).with_format("torch")
print(f'{dataset=}')
batch = dataset.take(batch_size)
# print(f'{next(iter(batch))=}')

# - Prepare functions to tokenize batch
def preprocess(examples):  # gets the raw text batch according to the specific names in table in data set & tokenize
    return tokenizer(examples["link"], padding="max_length", max_length=128, truncation=True, return_tensors="pt")
def map(batch):  # apply preprocess to batch to all examples in batch represented as a dataset
    return batch.map(preprocess, batched=True, remove_columns=remove_columns)
tokenized_batch = batch.map(preprocess, batched=True, remove_columns=remove_columns)
tokenized_batch = map(batch)
# print(f'{next(iter(tokenized_batch))=}')

from torch.utils.data import Dataset, DataLoader, SequentialSampler
dataset = tokenized_batch
print(f'{type(dataset)=}')
print(f'{dataset.__class__=}')
print(f'{isinstance(dataset, Dataset)=}')
# for i, d in enumerate(dataset):
#     assert isinstance(d, dict)
#     # dd = dataset[i]
#     # assert isinstance(dd, dict)
loader_opts = {}
classifier_opts = {}
# data_loader = DataLoader(dataset, shuffle=False, batch_size=loader_opts.get('batch_size', 1),
#                         num_workers=loader_opts.get('num_workers', 0), drop_last=False, sampler=SequentialSampler(range(512))  )
data_loader = DataLoader(dataset, shuffle=False, batch_size=loader_opts.get('batch_size', 1),
                    num_workers=loader_opts.get('num_workers', 0), drop_last=False, sampler=None)
print(f'{iter(data_loader)=}')
print(f'{next(iter(data_loader))=}')
print('Done\a')

with error:

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
/usr/local/lib/python3.10/dist-packages/torch/utils/data/_utils/collate.py in collate(batch, collate_fn_map)
    126         try:
--> 127             return elem_type({key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem})
    128         except TypeError:

9 frames
TypeError: default_collate: batch must contain tensors, numpy arrays, numbers, dicts or lists; found <class 'NoneType'>

During handling of the above exception, another exception occurred:

TypeError                                 Traceback (most recent call last)
/usr/local/lib/python3.10/dist-packages/torch/utils/data/_utils/collate.py in collate(batch, collate_fn_map)
    148                 return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed]
    149 
--> 150     raise TypeError(default_collate_err_msg_format.format(elem_type))
    151 
    152 

TypeError: default_collate: batch must contain tensors, numpy arrays, numbers, dicts or lists; found <class 'NoneType'>

why? And why doesn't the single data set c4 and wiki-text give this error? Only interleaved data sets?

Ideally I don't want to write my own collate_function.


Solution

  • For some reason when the data sets are intersected the collate function gets confused because there are extra rows so it doesn't know how to merge things? The way I fixed it is by only keeping the columns I want:

        # -- Get data set
        # remove_columns = ['text', 'timestamp', 'url']
        keep_col = ['text']
        # keep the strings in dataaset.column_names that intersect with keep_col str list, one liner
        print('-- interleaving datasets')
        datasets = [load_dataset(path, name, streaming=True, split="train").with_format("torch") for path, name in zip(path, name)]
        [print(f'{dataset.description=}') for dataset in datasets]
        dataset = interleave_datasets(datasets, probabilities)
        remove_columns = [col for col in dataset.column_names if col not in keep_col]
        print(f'{dataset=}')
        batch = dataset.take(batch_size)
    

    but also doing this in the collate works if you know the text field you want (assuming "text" due to how common it is):

        def collate_tokenize(data):
            print(f'{data[0]=}')
            text_batch = [element["text"] for element in data]
            tokenized = tokenizer(text_batch, padding='longest', truncation=True, return_tensors='pt')
            return tokenized
        data_loader = DataLoader(tokenized_batch, shuffle=False, batch_size=8, num_workers=0, drop_last=False, collate_fn=collate_tokenize)
        # data_loader = DataLoader(tokenized_batch, shuffle=False, batch_size=8, num_workers=0, drop_last=False)
        # num_batches = len(list(data_loader))
        batch = next(iter(data_loader))
        print(f'{batch=}')
        print('Done!\a')
    

    full code:

    def test_interleaved_data_set_2_data_loader():
        """ https://colab.research.google.com/drive/1QWDhA6Q64qijXYnwIGn63Aq9Eg5qt8tQ#scrollTo=Wjyy6QYimvIm """
        remove_columns = []
        # -- Get probe network
        from datasets import load_dataset
        import torch
        from transformers import GPT2Tokenizer, GPT2LMHeadModel
    
        tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
        if tokenizer.pad_token_id is None:
            tokenizer.pad_token = tokenizer.eos_token
        probe_network = GPT2LMHeadModel.from_pretrained("gpt2")
        device = torch.device(f"cuda:{0}" if torch.cuda.is_available() else "cpu")
        probe_network = probe_network.to(device)
    
        from datasets import interleave_datasets
    
        path, name = ['c4', 'wikitext'], ['en', 'wikitext-103-v1']
        probabilities = [1.0/len(path)] * len(path)
        batch_size = 512
    
        # -- Get data set
        # remove_columns = ['text', 'timestamp', 'url']
        keep_col = ['text']
        # keep the strings in dataaset.column_names that intersect with keep_col str list, one liner
        print('-- interleaving datasets')
        datasets = [load_dataset(path, name, streaming=True, split="train").with_format("torch") for path, name in zip(path, name)]
        [print(f'{dataset.description=}') for dataset in datasets]
        dataset = interleave_datasets(datasets, probabilities)
        remove_columns = [col for col in dataset.column_names if col not in keep_col]
        print(f'{dataset=}')
        batch = dataset.take(batch_size)
    
        # - Prepare functions to tokenize batch
        def preprocess(examples):
            return tokenizer(examples["text"], padding="max_length", max_length=128, truncation=True, return_tensors="pt")
        def map(batch):
            return batch.map(preprocess, batched=True, remove_columns=remove_columns)
        # tokenized_batch = batch.map(preprocess, batched=True, remove_columns=remove_columns)
        tokenized_batch = map(batch)
        print(f'{next(iter(tokenized_batch))=}')
    
        # -- Get data loader
        from torch.utils.data import DataLoader, Dataset
    
        # def collate_tokenize(data):
        #     print(f'{data[0]=}')
        #     text_batch = [element["text"] for element in data]
        #     tokenized = tokenizer(text_batch, padding='longest', truncation=True, return_tensors='pt')
        #     return tokenized
        # data_loader = DataLoader(tokenized_batch, shuffle=False, batch_size=8, num_workers=0, drop_last=False, collate_fn=collate_tokenize)
        data_loader = DataLoader(tokenized_batch, shuffle=False, batch_size=8, num_workers=0, drop_last=False)
        # num_batches = len(list(data_loader))
        batch = next(iter(data_loader))
        print(f'{batch=}')
        print('Done!\a')