I am working on Federated Learning experiments using Intel OpenFL. I want to distribute my dataset (MNIST) using different non-iidness scenarios. I am following their official documentation: https://openfl.readthedocs.io/en/latest/source/utilities/splitters_data.html
This is my original working code:
"""Mnist Shard Descriptor."""
import logging
import os
from typing import List
import numpy as np
import requests
from openfl.interface.interactive_api.shard_descriptor import ShardDataset
from openfl.interface.interactive_api.shard_descriptor import ShardDescriptor
logger = logging.getLogger(__name__)
class MnistShardDataset(ShardDataset):
"""Mnist Shard dataset class."""
def __init__(self, x, y, data_type, rank=1, worldsize=1):
"""Initialize MNISTDataset."""
self.data_type = data_type
self.rank = rank
self.worldsize = worldsize
self.x = x[self.rank - 1::self.worldsize]
self.y = y[self.rank - 1::self.worldsize]
def __getitem__(self, index: int):
"""Return an item by the index."""
return self.x[index], self.y[index]
def __len__(self):
"""Return the len of the dataset."""
return len(self.x)
class MnistShardDescriptor(ShardDescriptor):
"""Mnist Shard descriptor class."""
def __init__(
self,
rank_worldsize: str = '1, 1',
**kwargs
):
"""Initialize MnistShardDescriptor."""
self.rank, self.worldsize = tuple(int(num) for num in rank_worldsize.split(','))
(x_train, y_train), (x_test, y_test) = self.download_data()
self.data_by_type = {
'train': (x_train, y_train),
'val': (x_test, y_test)
}
def get_shard_dataset_types(self) -> List[str]:
"""Get available shard dataset types."""
return list(self.data_by_type)
def get_dataset(self, dataset_type='train'):
"""Return a shard dataset by type."""
if dataset_type not in self.data_by_type:
raise Exception(f'Wrong dataset type: {dataset_type}')
return MnistShardDataset(
*self.data_by_type[dataset_type],
data_type=dataset_type,
rank=self.rank,
worldsize=self.worldsize
)
@property
def sample_shape(self):
"""Return the sample shape info."""
return ['28', '28', '1']
@property
def target_shape(self):
"""Return the target shape info."""
return ['28', '28', '1']
@property
def dataset_description(self) -> str:
"""Return the dataset description."""
return (f'Mnist dataset, shard number {self.rank}'
f' out of {self.worldsize}')
def download_data(self):
"""Download prepared dataset."""
local_file_path = 'mnist.npz'
mnist_url = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz'
response = requests.get(mnist_url)
with open(local_file_path, 'wb') as f:
f.write(response.content)
with np.load(local_file_path) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
#x_train = np.reshape(x_train, (-1, 784))
#x_test = np.reshape(x_test, (-1, 784))
os.remove(local_file_path) # remove mnist.npz
print('Mnist data was loaded!')
return (x_train, y_train), (x_test, y_test)
Basically, I changed the MnistShardDescriptor class in both my 2 nodes of the federation in this way:
...
class MnistShardDescriptor(ShardDescriptor):
"""Mnist Shard descriptor class."""
def __init__(
self,
rank_worldsize: str = '1, 1',
**kwargs
):
"""Initialize MnistShardDescriptor."""
self.rank, self.worldsize = tuple(int(num) for num in rank_worldsize.split(','))
(x_train, y_train), (x_test, y_test) = self.download_data()
train_splitter = RandomNumPyDataSplitter()
test_splitter = RandomNumPyDataSplitter()
train_idx = train_splitter.split(y_train, self.worldsize)[self.rank]
test_idx = test_splitter.split(y_test, self.worldsize)[self.rank]
x_train_shard = x_train[train_idx]
x_test_shard = x_test[test_idx]
self.data_by_type = {
'train': (x_train, y_train),
'val': (x_test, y_test)
}
...
I have this error at the line train_idx
:IndexError: list index out of range
but only in one of the 2 nodes. I do not know why, because the code are exactly the same on both nodes of my federation.
EDIT: I changed the position of the code I have written above, and in particular I wrote in the class MnistShardDataset rather than MnistShardDescriptor:
class MnistShardDataset(ShardDataset):
"""Mnist Shard dataset class."""
def __init__(self, x, y, data_type, rank=1, worldsize=1):
"""Initialize MNISTDataset."""
self.data_type = data_type
self.rank = rank
self.worldsize = worldsize
self.x = x[self.rank - 1::self.worldsize]
self.y = y[self.rank - 1::self.worldsize]
train_splitter = RandomNumPyDataSplitter()
#test_splitter = RandomNumPyDataSplitter()
train_idx = train_splitter.split(self.y, self.worldsize)[self.rank]
#test_idx = test_splitter.split(self.y, self.worldsize)[self.rank]
x_train_shard = self.x[train_idx]
#x_test_shard = self.x[test_idx]
self.x = x_train_shard
With this I am able to create the federation and, in the same node of the director, the clients start training, and the split is truly random because I ran the experiment 2 times, and each time the envoy had a different number of samples. However in the other node (because I am using 2 nodes, one for each envoy) with the envoy (openFL calls envoy the worker on a client) I have the same error of Index out of range…
EDIT2: here is an example of data split using openFL: https://github.com/intel/openfl/blob/develop/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py
However my dataset is different, and I am not succeeding in adapting this solution. Any other example can you suggest to me, about sharding a dataset like MNIST? A tutorial to follow?
Entire error:
File "/home/lmancuso/envoymnist/mnist_shard_descriptor_with_data_splitter.py", line 61, in __init__
train_idx = train_splitter.split(y_train, self.worldsize)[self.rank]
IndexError: list index out of range
EDIT: interesting point: If I change the dimension of my federation, increasing from 2 to 3 the rank_worldsize inside the envoy_config.yaml, training starts (and the dataset is divided in a random way, so it works, because each node has different number of samples). However it works only because I have 2 nodes, but I created a federation of 3 without the 3 node. Indeed the samples are 8064 for one node and 9856 for another node. However considering that I have 60000 training samples in MNIST, all the remaining samples got lost, because they are supposed to be in the last node (which does not exist).
The only solution I found until now is to reduce the rank of each envoy:
train_idx = train_splitter.split(self.y, self.worldsize)[self.rank-1]