I am creating a model based on MobileNetV2:
# UNQ_C2
# GRADED FUNCTION
def alpaca_model(image_shape=IMG_SIZE, data_augmentation=data_augmenter()):
''' Define a tf.keras model for binary classification out of the MobileNetV2 model
Arguments:
image_shape -- Image width and height
data_augmentation -- data augmentation function
Returns:
Returns:
tf.keras.model
'''
input_shape = image_shape + (3,)
### START CODE HERE
base_model = tf.keras.applications.MobileNetV2(input_shape=None,
include_top=None, # <== Important!!!!
weights=None) # From imageNet
# Freeze the base model by making it non trainable
base_model.trainable = False
# create the input layer (Same as the imageNetv2 input size)
inputs = tf.keras.Input(shape=input_shape)
print("inputs size: ", str(inputs.shape))
# apply data augmentation to the inputs
x = data_augmentation(inputs)
print("x size: ", str(x.shape))
# data preprocessing using the same weights the model was trained on
x = preprocess_input(x)
print("x size: ", str(x.shape))
# set training to False to avoid keeping track of statistics in the batch norm layer
x = base_model(x, training=False)
# Add the new Binary classification layers
# use global avg pooling to summarize the info in each channel
x = tf.keras.layers.GlobalAveragePooling2D()(x)
print("x size: ", str(x.shape))
#include dropout with probability of 0.2 to avoid overfitting
x = tf.keras.layers.Dropout(0.2)(x)
print("x size: ", str(x.shape))
# create a prediction layer with one neuron (as a classifier only needs one)
prediction_layer = tf.keras.layers.Dense(2 ,activation='softmax')(x)
print("prediction_layer size: ", str(prediction_layer.shape))
### END CODE HERE
outputs = prediction_layer
model = tf.keras.Model(inputs, outputs)
return model
However, the testing script
model2 = alpaca_model(IMG_SIZE, data_augmentation)
from test_utils import summary, comparator
alpaca_summary = [['InputLayer', [(None, 160, 160, 3)], 0],
['Sequential', (None, 160, 160, 3), 0],
['TensorFlowOpLayer', [(None, 160, 160, 3)], 0],
['TensorFlowOpLayer', [(None, 160, 160, 3)], 0],
['Functional', (None, 5, 5, 1280), 2257984],
['GlobalAveragePooling2D', (None, 1280), 0],
['Dropout', (None, 1280), 0, 0.2],
['Dense', (None, 1), 1281, 'linear']] #linear is the default activation
comparator(summary(model2), alpaca_summary)
for layer in summary(model2):
print(layer)
gives the following error:
Test failed
Expected value
['Functional', (None, 5, 5, 1280), 2257984]
does not match the input value:
['Functional', (None, None, None, 1280), 2257984]
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-67-0346cb4bf847> in <module>
10 ['Dense', (None, 1), 1281, 'linear']] #linear is the default activation
11
---> 12 comparator(summary(model2), alpaca_summary)
13
14 for layer in summary(model2):
~/work/W2A2/test_utils.py in comparator(learner, instructor)
19 "\n\n does not match the input value: \n\n",
20 colored(f"{a}", "red"))
---> 21 raise AssertionError("Error in test")
22 print(colored("All tests passed!", "green"))
23
AssertionError: Error in test
I printed out the shape of the tensor each step. I don't see anything wrong since the [none, 160, 160, 3]
image eventually gets flatten into [none, 1280]
, before it gets fed into a binary classifier.
I am not sure what is going here. I am pretty new to python and CNN. Thank you.
Change model to this:
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=None, # <== Important!!!!
weights='imagenet') # From imageNet