I'm using resnet18 for for grounded image semgmentation but when I pass the input to the model I get this torch error:
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor
I got the weights from here:
'https://download.pytorch.org/models/resnet18-5c106cde.pth'
I assume the weights are trained on nvidia hardware because why wouldn't they be, so that leaves out my input tensor as being processed on my CPU (r7 1600x), but that's strange because I treid to make sure everything is being run on my nvidia GPU (rtx 3060) which is recognized (torch.cuda.is_available() == True
).
Here's the code I think is relevant
class Masking:
def __init__(
self,
device_handle:Provider='cuda:0', #<--------------------------
classifier:Module=None,
face_parser:Module=None,
num_classes=19
) -> None:
self.device = device(device_handle)
self.classifier = classifier
self.face_parser = face_parser
self.num_classes = num_classes
self.provided_image = None
print("is cuda available: ", torch.cuda.is_available()) # True
def startup_model(self):
self.classifier.to(self.device) #<-----------------------------
self.classifier.load_state_dict(self.face_parser)
self.classifier.eval()
def preprocess_image(self, image_path):
to_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
self.provided_image = Image.open(image_path)
w, h = self.provided_image.size
interpolated_image = self.provided_image.resize((w, h), Image.BILINEAR)
composed_image = to_tensor(interpolated_image)
expanded_tensor = unsqueeze(composed_image, 0)
expanded_tensor.to(self.device) #<--------------------------------
#expanded_tensor.to("cuda:0") # doesn't work either
#expanded_tensor.cuda() # no dice
out = self.classifier(expanded_tensor)[0] # <<<<< ERROR
return out
masker = Masking(
'cuda:0',
BiSeNet(n_classes=19),
load('path/to/my/models/79999_iter.pth', device('cuda:0')) #<---------------------
)
....Then I run the methods I defined above etc
Unlike nn.Module.to
, torch.Tensor.to
doesn't work in place.
So you must assign the result back to a variable:
expanded_tensor = expanded_tensor.to(self.device)