I want to apply augmentation on voice samples on the fly. I tested My custom voice augmentation data generator and it seems it works well. but when I fit in the CNN model, I get the following error:
Due to the limitation for uploading the codes, I will add the rest of the codes later.
UnknownError Traceback (most recent call last)
Cell In [31], line 1
----> 1 history = model.fit(train_aug_gen,
2 #steps_per_epoch=5,
3 #validation_steps=5,
4 validation_data = val_aug_gen,
5 epochs=num_epochs,verbose=2,
6 callbacks = [earlystopping, scheduler, checkpointer])
File ~/miniforge3/envs/env_tf/lib/python3.9/site-packages/keras/utils/traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File ~/miniforge3/envs/env_tf/lib/python3.9/site-packages/tensorflow/python/eager/execute.py:54, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
---> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
UnknownError: Graph execution error:
2 root error(s) found.
(0) UNKNOWN: KeyError: 0
Traceback (most recent call last):
File "/Users/local/miniforge3/envs/env_tf/lib/python3.9/site-packages/pandas/core/indexes/range.py", line 391, in get_loc
return self._range.index(new_key)
ValueError: 0 is not in range
I don't know how to solve this issue.
class CustomVoiceAugmentation(tf.keras.utils.Sequence):
def __init__(self, dir_data, data_df, aug_level, num_classes, shuffle):
.......
.......
print(f"Found {self.data_frame.shape[0]} voices belonging to {self.num_classes} classes")
def __len__(self):
return (self.data_len // self.batch_size)
def __voice_augmentation(self,signalvoice):
.....
.....
.....
return augmented_voice
def __get_voice(self,index, batch_x, batch_y, dir_data):
X = []
Y = []
for i, name in enumerate(batch_x):
label = batch_y[i]
pathvoice = os.path.join(self.dir_data + '/'+ label+ '/'+ name)
.....
.....
.....
return X, Y
def __to_melspectrogram (self,augmented_voice_signal):
......
return image
def __getitem__(self, index):
batch_x = self.data_frame["VoiceName"][index * self.batch_size:(index + 1) * self.batch_size]
batch_y = self.data_frame["label"][index * self.batch_size:(index + 1) * self.batch_size]
X ,Y = self.__get_voice(index, batch_x, batch_y, self.dir_data)
......
......
......
return tf.convert_to_tensor(image), new_Y
def on_epoch_end(self):
if self.shuffle == True:
self.data_frame = self.data_frame.sample(frac=1).reset_index(drop=True)
Adding label=batch_y.iloc[i]
solved the issue.