pythonmachine-learningdeep-learningpytorchdropout

Implementing dropout from scratch


This code attempts to utilize a custom implementation of dropout :

%reset -f

import torch
import torch.nn as nn
# import torchvision
# import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torch.utils.data as data_utils
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F

num_epochs = 1000

number_samples = 10

from sklearn.datasets import make_moons
from matplotlib import pyplot
from pandas import DataFrame
# generate 2d classification dataset
X, y = make_moons(n_samples=number_samples, noise=0.1)
# scatter plot, dots colored by class value

x_data = [a for a in enumerate(X)]
x_data_train = x_data[:int(len(x_data) * .5)]
x_data_train = [i[1] for i in x_data_train]
x_data_train

y_data = [y[i[0]] for i in x_data]
y_data_train = y_data[:int(len(y_data) * .5)]
y_data_train

x_test = [a[1] for a in x_data[::-1][:int(len(x_data) * .5)]]
y_test = [a for a in y_data[::-1][:int(len(y_data) * .5)]]

x = torch.tensor(x_data_train).float() # <2>
print(x)

y = torch.tensor(y_data_train).long()
print(y)

x_test = torch.tensor(x_test).float()
print(x_test)

y_test = torch.tensor(y_test).long()
print(y_test)

class Dropout(nn.Module):
    def __init__(self, p=0.5, inplace=False):
#         print(p)
        super(Dropout, self).__init__()
        if p < 0 or p > 1:
            raise ValueError("dropout probability has to be between 0 and 1, "
                             "but got {}".format(p))
        self.p = p
        self.inplace = inplace

    def forward(self, input):
        print(list(input.shape))
        return np.random.binomial([np.ones((len(input),np.array(list(input.shape))))],1-dropout_percent)[0] * (1.0/(1-self.p))

    def __repr__(self):
        inplace_str = ', inplace' if self.inplace else ''
        return self.__class__.__name__ + '(' \
            + 'p=' + str(self.p) \
            + inplace_str + ')'

class MyLinear(nn.Linear):
    def __init__(self, in_feats, out_feats, drop_p, bias=True):
        super(MyLinear, self).__init__(in_feats, out_feats, bias=bias)
        self.custom_dropout = Dropout(p=drop_p)

    def forward(self, input):
        dropout_value = self.custom_dropout(self.weight)
        return F.linear(input, dropout_value, self.bias)



my_train = data_utils.TensorDataset(x, y)
train_loader = data_utils.DataLoader(my_train, batch_size=2, shuffle=True)

my_test = data_utils.TensorDataset(x_test, y_test)
test_loader = data_utils.DataLoader(my_train, batch_size=2, shuffle=True)

# Device configuration
device = 'cpu'
print(device)

# Hyper-parameters 
input_size = 2
hidden_size = 100
num_classes = 2

learning_rate = 0.0001

pred = []

# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes, p):
        super(NeuralNet, self).__init__()
#         self.drop_layer = nn.Dropout(p=p)
#         self.drop_layer = MyLinear()
#         self.fc1 = MyLinear(input_size, hidden_size, p)
        self.fc1 = MyLinear(input_size, hidden_size , p) 
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, num_classes)  

    def forward(self, x):
#         out = self.drop_layer(x)
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out

model = NeuralNet(input_size, hidden_size, num_classes, p=0.9).to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)  

# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):  
        # Move tensors to the configured device
        images = images.reshape(-1, 2).to(device)
        labels = labels.to(device)

        # Forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if (epoch) % 100 == 0:
        print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item()))

Custom dropout is implemented as :

class Dropout(nn.Module):
    def __init__(self, p=0.5, inplace=False):
#         print(p)
        super(Dropout, self).__init__()
        if p < 0 or p > 1:
            raise ValueError("dropout probability has to be between 0 and 1, "
                             "but got {}".format(p))
        self.p = p
        self.inplace = inplace

    def forward(self, input):
        print(list(input.shape))
        return np.random.binomial([np.ones((len(input),np.array(list(input.shape))))],1-dropout_percent)[0] * (1.0/(1-self.p))

    def __repr__(self):
        inplace_str = ', inplace' if self.inplace else ''
        return self.__class__.__name__ + '(' \
            + 'p=' + str(self.p) \
            + inplace_str + ')'

class MyLinear(nn.Linear):
    def __init__(self, in_feats, out_feats, drop_p, bias=True):
        super(MyLinear, self).__init__(in_feats, out_feats, bias=bias)
        self.custom_dropout = Dropout(p=drop_p)

    def forward(self, input):
        dropout_value = self.custom_dropout(self.weight)
        return F.linear(input, dropout_value, self.bias)

It seems I've implemented the dropout function incorrectly ? :

np.random.binomial([np.ones((len(input),np.array(list(input.shape))))],1-dropout_percent)[0] * (1.0/(1-self.p))

How to modify in order to correctly utilize dropout ?

These posts were useful in getting to this point :

Hinton's Dropout in 3 Lines of Python : https://iamtrask.github.io/2015/07/28/dropout/

Making a Custom Dropout Function : https://discuss.pytorch.org/t/making-a-custom-dropout-function/14053/2


Solution

  • It seems I've implemented the dropout function incorrectly?

    np.random.binomial([np.ones((len(input),np.array(list(input.shape))))],1 dropout_percent)[0] * (1.0/(1-self.p))
    

    In fact, the above implementation is known as Inverted Dropout. Inverted Dropout is how Dropout is implemented in practice in the various deep learning frameworks.

    What is inverted dropout?

    Before jump into the inverted dropout, it can be helpful to see how Dropout works for a single neuron:

    Since during train phase a neuron is kept on with probability q (=1-p), during the testing phase we have to emulate the behavior of the ensemble of networks used in the training phase. To this end, the authors suggest scaling the activation function by a factor of q during the test phase in order to use the expected output produced in the training phase as the single output required in the test phase (Section 10, Multiplicative Gaussian Noise). Thus:

    Inverted dropout is a bit different. This approach consists in the scaling of the activations during the training phase, leaving the test phase untouched. The scale factor is the inverse of the keep probability 1/1-p = 1/q, thus:

    enter image description here

    Inverted dropout helps to define the model once and just change a parameter (the keep/drop probability) to run train and test on the same model. Direct Dropout, instead, force you to modify the network during the test phase because if you don’t multiply by q the output the neuron will produce values that are higher respect to the one expected by the successive neurons (thus the following neurons can saturate or explode): that’s why Inverted Dropout is the more common implementation.

    References:


    How implement inverted dropout Pytorch?

    class MyDropout(nn.Module):
        def __init__(self, p: float = 0.5):
            super(MyDropout, self).__init__()
            if p < 0 or p > 1:
                raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p))
            self.p = p
    
        def forward(self, X):
            if self.training:
                binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
                return X * binomial.sample(X.size()) * (1.0/(1-self.p))
            return X
    

    How to implement in Numpy?

    import numpy as np
    
    pKeep = 0.8
    weights = np.ones([1, 5])
    binary_value = np.random.rand(weights.shape[0], weights.shape[1]) < pKeep
    res = np.multiply(weights, binary_value)
    res /= pKeep  # this line is called inverted dropout technique
    print(res)
    

    How to implement in Tensorflow?

    import tensorflow as tf
    tf.enable_eager_execution()
    
    weights = tf.ones(shape=[1, 5])
    keep_prob = 0.8
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(weights.shape)
    # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
    binary_tensor = tf.floor(random_tensor)
    ret = tf.div(weights, keep_prob) * binary_tensor
    print(ret)