I'm trying to build a dcnn, but I got this error:
ValueError: ('The specified size contains a dimension with value <= 0', (-192, 1024))
And really, I don't have idea the reason of this error, here's my code:
The data:
c_X = open("C:/Users/PC/Desktop/Notebooks/Isabelle/mfcc_train_I_C_I_C_2.dat", "r")
c_y = open("C:/Users/PC/Desktop/Notebooks/Isabelle/phoneme_train_I_C_I_C_2.dat", "r")
c_X = np.fromfile(c_X, np.dtype('float32'))
c_y = np.fromfile(c_y, np.dtype('int8'))
c_X = c_X.reshape(886887,1120)
c_X = c_X.reshape(c_X.shape[0], 1, 20, 56)
c_y = one_hot(c_y)
#c_y = np.append(c_y, np.zeros((374975,1)), axis=1)
X_3 = apendice(Colere_X, c_X)
y_3 = apendice(Colere_y, c_y)
#print(c_X.shape, c_y.shape)
print(X_3.shape, y_3.shape)
(1123867, 1, 20, 56) (1123867, 38)
This is my neural network implementation (the problem is here I think):
model = Sequential()
model.add(Conv2D(32, (3, 3), border_mode='valid', activation='relu',input_shape=(1, 20, 56)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3, 3), border_mode='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3, 3), border_mode='valid', activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
start = time.time()
model_info = model.fit(X_3, y_3, batch_size=100, \
epochs=20, verbose=2, validation_data=(X_test, y_test))
end = time.time()
Here the summary of model:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_21 (Conv2D) (None, -1, 18, 32) 16160
_________________________________________________________________
dropout_16 (Dropout) (None, -1, 18, 32) 0
_________________________________________________________________
conv2d_22 (Conv2D) (None, -3, 16, 32) 9248
_________________________________________________________________
max_pooling2d_11 (MaxPooling (None, -2, 8, 32) 0
_________________________________________________________________
dropout_17 (Dropout) (None, -2, 8, 32) 0
_________________________________________________________________
conv2d_23 (Conv2D) (None, -4, 6, 32) 9248
_________________________________________________________________
conv2d_24 (Conv2D) (None, -6, 4, 32) 9248
_________________________________________________________________
max_pooling2d_12 (MaxPooling (None, -3, 2, 32) 0
_________________________________________________________________
dropout_18 (Dropout) (None, -3, 2, 32) 0
_________________________________________________________________
flatten_6 (Flatten) (None, -192) 0
=================================================================
Total params: 43,904
Trainable params: 43,904
Non-trainable params: 0
_________________________________________________________________
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-17-589407073ff5> in <module>()
13 model.add(Flatten())
14 model.summary()
---> 15 model.add(Dense(256, activation='relu'))
16 model.add(Dropout(0.5))
17 model.add(Dense(num_classes, activation='softmax'))
~\Anaconda3\envs\tensorflow-gpu\lib\site-packages\keras\models.py in add(self, layer)
467 output_shapes=[self.outputs[0]._keras_shape])
468 else:
--> 469 output_tensor = layer(self.outputs[0])
470 if isinstance(output_tensor, list):
471 raise TypeError('All layers in a Sequential model '
~\Anaconda3\envs\tensorflow-gpu\lib\site-packages\keras\engine\topology.py in __call__(self, inputs, **kwargs)
567 '`layer.build(batch_input_shape)`')
568 if len(input_shapes) == 1:
--> 569 self.build(input_shapes[0])
570 else:
571 self.build(input_shapes)
~\Anaconda3\envs\tensorflow-gpu\lib\site-packages\keras\layers\core.py in build(self, input_shape)
823 name='kernel',
824 regularizer=self.kernel_regularizer,
--> 825 constraint=self.kernel_constraint)
826 if self.use_bias:
827 self.bias = self.add_weight(shape=(self.units,),
~\Anaconda3\envs\tensorflow-gpu\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
85 warnings.warn('Update your `' + object_name +
86 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 87 return func(*args, **kwargs)
88 wrapper._original_function = func
89 return wrapper
~\Anaconda3\envs\tensorflow-gpu\lib\site-packages\keras\engine\topology.py in add_weight(self, name, shape, dtype, initializer, regularizer, trainable, constraint)
389 if dtype is None:
390 dtype = K.floatx()
--> 391 weight = K.variable(initializer(shape), dtype=dtype, name=name)
392 if regularizer is not None:
393 self.add_loss(regularizer(weight))
~\Anaconda3\envs\tensorflow-gpu\lib\site-packages\keras\initializers.py in __call__(self, shape, dtype)
206 limit = np.sqrt(3. * scale)
207 return K.random_uniform(shape, -limit, limit,
--> 208 dtype=dtype, seed=self.seed)
209
210 def get_config(self):
~\Anaconda3\envs\tensorflow-gpu\lib\site-packages\keras\backend\theano_backend.py in random_uniform(shape, minval, maxval, dtype, seed)
2189 seed = np.random.randint(1, 10e6)
2190 rng = RandomStreams(seed=seed)
-> 2191 return rng.uniform(shape, low=minval, high=maxval, dtype=dtype)
2192
2193
~\Anaconda3\envs\tensorflow-gpu\lib\site-packages\theano\sandbox\rng_mrg.py in uniform(self, size, low, high, ndim, dtype, nstreams)
854 raise ValueError(
855 "The specified size contains a dimension with value <= 0",
--> 856 size)
857
858 else:
ValueError: ('The specified size contains a dimension with value <= 0', (-192, 256))
I will appreciate your help. Thanks in advance.
This is a problem related to the channels ordering in Keras. You probably have set image_dim_ordering
or image_data_format parameter
in ~/.keras/keras.json
to either tf
or channels_last
, depending on which version of Keras you are using. The problem is that the input_shape
you provided is in the th
or channels first
order, which messes up how the dimensions are interpreted and produces negative dimension sizes.
This would happen if you first used Keras with TensorFlow and then you switched to Theano.
The solution is quite easy, you can either set the corresponding parameter in the keras.json
config file to th
or channels_first
so it matches your file, or you change the input_shape
of your data to (20, 56, 1)
. Both solutions should work and you should prefer the native channel ordering of the backend you are using (as it is slightly faster).