pythonmachine-learningneural-networkconv-neural-network

TypeError: '_IncompatibleKeys' object is not callable


I am training a CNN for a multilabel classification problem and have saved my .pt model with torch.save(model.state_dict(), "model.pt") . For some reason when I test the model with a custom function predict(x) that takes an array of images as input, I get the following error: TypeError: '_IncompatibleKeys' object is not callable. It points out to the last chunk of the cade below: y_test_pred = model(images_tensors). Any idea on what could be the issue here?

import numpy as np
import cv2
import torch
from torch import nn
import torch.nn.functional as F
import os


class Net(nn.Module):
    def __init__(self, classes_number):
        super().__init__()
        self.ConvLayer1 = nn.Sequential(
            nn.Conv2d(1, 8, 5),  # inp (1, 512, 512)
            nn.MaxPool2d(2),
            nn.ReLU()  # op (8, 254, 254)
        )
        self.ConvLayer2 = nn.Sequential(
            nn.Conv2d(8, 16, 3),  # inp (8, 254, 254)
            nn.MaxPool2d(2),
            nn.ReLU(),
            nn.BatchNorm2d(16)  # op (16, 126, 126)
        )
        self.ConvLayer3 = nn.Sequential(
            nn.Conv2d(16, 32, 3),  # inp (16, 126, 126)
            nn.MaxPool2d(2),
            nn.ReLU(),
            nn.BatchNorm2d(32)  # op (32, 62, 62)
        )
        self.ConvLayer4 = nn.Sequential(
            nn.Conv2d(32, 64, 3),  # inp (32, 62, 62)
            nn.MaxPool2d(2),
            nn.ReLU()  # op (64, 30, 30)
        )
        self.Lin1 = nn.Linear(30 * 30 * 64, 1500)
        self.drop = nn.Dropout(0.5)
        self.Lin2 = nn.Linear(1500, 150)
        self.drop = nn.Dropout(0.3)
        self.Lin3 = nn.Linear(150, classes_number)

    def forward(self, x):
        x = self.ConvLayer1(x)
        x = self.ConvLayer2(x)
        x = self.ConvLayer3(x)
        x = self.ConvLayer4(x)
        x = x.view(x.size(0), -1)
        x = F.relu(self.Lin1(x))
        x = self.drop(x)
        x = F.relu(self.Lin2(x))
        x = self.drop(x)
        x = self.Lin3(x)
        out = torch.sigmoid(x)
        return out


def predict(x):
    # On the exam, x will be a list of all the paths to the images of our held-out set
    images = []
    for img_path in x:
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Turn into greyscale
        img = cv2.resize(img, (512, 512))
        images.append(img)
    images = np.array(images)
    images = images.reshape(len(images), 1, images.shape[1], images.shape[1])  # converting(n,512,512)>(n,1,512,512)
    images_tensors = torch.FloatTensor(np.array(images))
    images_tensors = images_tensors.to(device)
    classes = ["red blood cell", "difficult", "gametocyte", "trophozoite", "ring", "schizont", "leukocyte"]
    model = Net(len(classes))
    model = model.load_state_dict(torch.load('model.pt'))


    y_test_pred = model(images_tensors)
    y_test_pred[y_test_pred > 0.49] = 1
    y_test_pred[y_test_pred < 0.5] = 0

    return y_test_pred.cpu().detach()

Solution

  • The buggy line is model = model.load_state_dict(torch.load('model.pt')). According to the docs, load_state_dict returns a NamedTuple with missing_keys and unexpected_keys fields, not a model object. In your code you assign this named tuple to the model variable, so then when you call model in the next line you are actually trying to call the NamedTuple, which gives you the TypeError.

    Instead, according to the saving and loading modules docs, you should do something like this:

    model = Net(len(classes))
    model.load_state_dict(torch.load(PATH))
    model.eval()