pythonpytorchloss-function

Loss function error in pytorch-How should I use the loss function?


I wrote this code:

import torch
import torch.nn as nn
import torch.nn.functional as f
import math
import matplotlib.pyplot as plt
import numpy as np
import random

class dnn(nn.Module):
    def __init__(self):
        super(dnn, self).__init__()
        self.l1 = nn.Linear(in_features=1, out_features=3)
        self.l2 = nn.Linear(in_features=3, out_features=3)
        self.l3 = nn.Linear(in_features=3, out_features=1)

    def forward(self, out):
        out = f.leaky_relu(self.l1(out))
        out = f.leaky_relu(self.l2(out))
        out = f.leaky_relu(self.l3(out))
        return out

model = dnn()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)

for k in range(100):
    for i in range(30):
        outputs = model(torch.tensor([i]).type(torch.FloatTensor))
        loss = criterion(outputs, torch.tensor([math.cos(i)]).type(torch.FloatTensor))
        *print(loss)*
        loss.backward()
        optimizer.zero_grad()
        optimizer.step()
    print('{}%'.format(k))

x = []
y = []
p = []

for i in range(30):
    x.append(i)
    y.append(math.cos(i))
    p.append(str(model(torch.tensor([i]).type(torch.FloatTensor))).split('[')[1].split(']')[0])

plt.plot(x, p, 'g')
plt.plot(x, y, 'b')

plt.show()

However, the results predicted by this model were very strange.

https://i.sstatic.net/bs6Q2.png

So I tried a few more times and kept getting similar results. While I was looking at various variables in the code to solve the problem, I found something strange.

print(loss):

tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)
tensor(-0., grad_fn=<DivBackward1>)

The loss function has not changed. How do I solve this?


Solution

  • you need loss function for task of regression, for example

    criterion = nn.L1Loss()
    

    nn.CrossEntropyLoss() expects targets as indexes of class or probabilities of class, you can change your network for binary classification task

    import torch
    import torch.nn as nn
    import torch.nn.functional as f
    import math
    import matplotlib.pyplot as plt
    import numpy as np
    import random
    
    class dnn(nn.Module):
        def __init__(self):
            super(dnn, self).__init__()
            self.l1 = nn.Linear(in_features=1, out_features=3)
            self.l2 = nn.Linear(in_features=3, out_features=3)
            self.l3 = nn.Linear(in_features=3, out_features=2)
    
        def forward(self, out):
            out = f.leaky_relu(self.l1(out))
            out = f.leaky_relu(self.l2(out))
            out = f.leaky_relu(self.l3(out))
            
            return out
    
    model = dnn()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
    
    for k in range(100):
        for i in range(30):
            # print(torch.tensor([i]))
            outputs = model(torch.tensor([i]).type(torch.FloatTensor))
            
            loss = criterion(outputs, torch.tensor([math.cos(i), 0]).type(torch.FloatTensor))
            loss.backward()
            optimizer.zero_grad()
            optimizer.step()
        print('{}%'.format(k))
    
    x = []
    y = []
    p = []
    
    for i in range(30):
        x.append(i)
        y.append(math.cos(i))
        p.append(str(model(torch.tensor([i]).type(torch.FloatTensor))).split('[')[1].split(']')[0])
    
    plt.plot(x, p, 'g')
    plt.plot(x, y, 'b')
    
    plt.show()