pythontensorflowkerasdeep-learningconv-neural-network

ValueError: The layer sequential_4 has never been called and thus has no defined input


I have built a CNN model with the following layers:

def build_trainable_cnn(input_shape, num_classes):
    """
    Create a CNN model for feature extraction
    
    Parameters:
    input_shape: shape of input features (e.g., (500, 13) for MFCCs)
    
    Returns:
    model: CNN model
    """
    model = models.Sequential()
    model.add(layers.Input(shape=input_shape)) 
    
    # First convolutional block
    model.add(layers.Conv1D(64, 3, padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling1D(pool_size=2))
    model.add(layers.Dropout(0.05))
    
    # Second convolutional block
    model.add(layers.Conv1D(128, 3, padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling1D(pool_size=2))
    model.add(layers.Dropout(0.05))
    
    # Third convolutional block
    model.add(layers.Conv1D(128, 3, padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling1D(pool_size=2))
    model.add(layers.Dropout(0.05))
    
    # Fourth convolutional block
    model.add(layers.Conv1D(128, 3, padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling1D(pool_size=2))
    model.add(layers.Dropout(0.05))
    
    # Flatten layer to get 1D feature vector
    model.add(layers.Flatten())
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dropout(0.05))
    
    # Classification head
    model.add(layers.Dense(num_classes, activation='softmax'))
    
    return model

This is how I load my data:

mfcc_data = np.load("/kaggle/working/mfcc_features.npy")  # shape: (segments, 500, 13)
logmel_data = np.load("/kaggle/working/logmel_features.npy")  # shape: (segments, 500, 26)

this is how I trained it:

NUM_CLASSES = 3
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam

model_mfcc = build_trainable_cnn(input_shape=(500, 13), num_classes=NUM_CLASSES)

model_mfcc.compile(optimizer=Adam(1e-4), 
              loss='sparse_categorical_crossentropy', 
              metrics=['accuracy'])

model_mfcc.fit(mfcc_data, labels, 
          epochs=30,
          batch_size=32)

and now I want to extract features from the Dense layer:

# Extract features from the Dense(256) layer (before final classification)
feature_extractor = models.Model(inputs=[model_mfcc.input], outputs=[model_mfcc.layers[-3].output])
# Save it
feature_extractor.save("cnn_feature_extractor_mfcc.h5")

But it's not working and I keep getting the error in the title. I tried the following code but still no change in error:

# Ensure model is built
model_mfcc.build(input_shape=(None, 500, 13))
# Call the model on dummy input to ensure it's initialized (if not already done)
_ = model_mfcc(np.zeros((1, 500, 13)))

Solution

  • Hi remotestbeach,

    you can replace keras sequential api to functional api, the functional api will initial the model.input when you build the model.

    Here is the corrected code:

    from tensorflow.keras import Model, Input
    from tensorflow.keras.layers import Conv1D, BatchNormalization, MaxPooling1D, Dropout, Flatten, Dense
    
    def build_trainable_cnn_functional(input_shape, num_classes):
        inputs = Input(shape=input_shape)
    
        
        x = Conv1D(64, 3, padding='same', activation='relu')(inputs)
        x = BatchNormalization()(x)
        x = MaxPooling1D(pool_size=2)(x)
        x = Dropout(0.05)(x)
    
        
        x = Conv1D(128, 3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPooling1D(pool_size=2)(x)
        x = Dropout(0.05)(x)
    
        
        x = Conv1D(128, 3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPooling1D(pool_size=2)(x)
        x = Dropout(0.05)(x)
    
        
        x = Conv1D(128, 3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPooling1D(pool_size=2)(x)
        x = Dropout(0.05)(x)
    
        # Flatten + Dense
        x = Flatten()(x)
        feature_vector = Dense(256, activation='relu', name='feature_dense')(x)
        x = Dropout(0.05)(feature_vector)
    
        # 分類輸出
        outputs = Dense(num_classes, activation='softmax', name='classification')(x)
    
        model = Model(inputs=inputs, outputs=outputs)
        return model
    
    
    
    
    NUM_CLASSES = 3
    model_mfcc = build_trainable_cnn_functional((500, 13), num_classes=NUM_CLASSES)
    
    
    feature_extractor = Model(
        inputs=model_mfcc.input,
        outputs=model_mfcc.get_layer('feature_dense').output
    )
    
    
    feature_extractor.save("cnn_feature_extractor_mfcc.h5")