I Can calculate accuracy after each epoch using this code . But, I want to calculate the accuracy for each class at the end . how can i do that? I have two folders train and val . each folder has 7 folders of 7 different classes. the train folder is used for training .otherwise val folder is used for testing
def train_model(model, criterion, optimizer, lr_scheduler, num_epochs=25):
since = time.time()
best_model = model
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
mode='train'
optimizer = lr_scheduler(optimizer, epoch)
model.train() # Set model to training mode
else:
model.eval()
mode='val'
running_loss = 0.0
running_corrects = 0
counter=0
# Iterate over data.
for data in dset_loaders[phase]:
inputs, labels = data
print(inputs.size())
# wrap them in Variable
if use_gpu:
try:
inputs, labels = Variable(inputs.float().cuda()),
Variable(labels.long().cuda())
except:
print(inputs,labels)
else:
inputs, labels = Variable(inputs), Variable(labels)
# Set gradient to zero to delete history of computations in previous epoch. Track operations so that differentiation can be done automatically.
optimizer.zero_grad()
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# print('loss done')
# Just so that you can keep track that something's happening and don't feel like the program isn't running.
# if counter%10==0:
# print("Reached iteration ",counter)
counter+=1
# backward + optimize only if in training phase
if phase == 'train':
# print('loss backward')
loss.backward()
# print('done loss backward')
optimizer.step()
# print('done optim')
# print evaluation statistics
try:
# running_loss += loss.data[0]
running_loss += loss.item()
# print(labels.data)
# print(preds)
running_corrects += torch.sum(preds == labels.data)
# print('running correct =',running_corrects)
except:
print('unexpected error, could not calculate loss or do a sum.')
print('trying epoch loss')
epoch_loss = running_loss / dset_sizes[phase]
epoch_acc = running_corrects.item() / float(dset_sizes[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val':
if USE_TENSORBOARD:
foo.add_scalar_value('epoch_loss',epoch_loss,step=epoch)
foo.add_scalar_value('epoch_acc',epoch_acc,step=epoch)
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model = copy.deepcopy(model)
print('new best accuracy = ',best_acc)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
print('returning and looping back')
return best_model
def exp_lr_scheduler(optimizer, epoch, init_lr=BASE_LR, lr_decay_epoch=EPOCH_DECAY):
"""Decay learning rate by a factor of DECAY_WEIGHT every lr_decay_epoch epochs."""
lr = init_lr * (DECAY_WEIGHT**(epoch // lr_decay_epoch))
if epoch % lr_decay_epoch == 0:
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
Calculating overall accuracy is rather straight forward:
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
acc_all = (preds == labels).float().mean()
To calculate it per class requires a few more lines of code:
acc = [0 for c in list_of_classes]
for c in list_of_classes:
acc[c] = ((preds == labels) * (labels == c)).float().sum() / (max(labels == c).sum(), 1))