pythonmachine-learningartificial-intelligence

Need help verifying data provided by google/machine-learning course


In the gradient descent page on the intro to machine-learning course provided by google provided are features and corresponding labels, MSE loss function, initial datasets, and results. I am having difficulty verifying the results that they have and I am wondering if anyone can assist in confirming whether or not I am making a mistake or they are.

I have the following:

import pandas as pd
import numpy as np

data = [3.5, 18], [3.69, 15], [3.44, 18], [3.43, 16], [4.34, 15], [4.42, 14], [2.37, 24]
initial_data_df = pd.DataFrame(data,columns=['pounds','mpg'])

number_of_iterations = 6
weight = 0 # initialize weights
bias = 0 # initialize weights
weight_slope = 0
bias_slope = 0
final_results_df = pd.DataFrame()
learning_rate = 0.01

for i in range(number_of_iterations):
    loss = calculate_loss(initial_data_df,weight,bias)
    final_results_df = update_results(final_results_df,weight,bias,loss)
    weight_slope = find_weight_slope(initial_data_df,weight,bias)
    bias_slope = find_bias_slope(initial_data_df,weight,bias)
    weight = new_weight_update(weight,learning_rate,weight_slope)
    bias = new_bias_update(bias,learning_rate,bias_slope)
print(final_results_df)

def calculate_loss(df,weight,bias):
    loss_summation = []
    for i in range(0,len(df)):
        loss_summation.append((df['mpg'][i]-((weight*df['pounds'][i])+bias))**2)
    return (sum(loss_summation)//len(df))

def update_results(df,weight,bias,loss):
    if df.empty:
        df = pd.DataFrame([[weight,bias,loss]],columns=['weight','bias','loss'])
    else:
        df = pd.concat([df,pd.DataFrame([[weight,bias,loss]],columns=df.columns)])
    return df

def find_weight_slope(df,weight,bias):
    weight_update_summation = []
    for i in range(0,len(df)):
        wx_plus_b = (weight*df['pounds'][i])+bias
        wx_plus_b_minus_y = wx_plus_b-df['mpg'][i]
        weight_update_summation.append(2*(wx_plus_b_minus_y*df['pounds'][i]))
    return sum(weight_update_summation)//len(df)

def find_bias_slope(df,weight,bias):
    bias_update_summation = []
    for i in range(0,len(df)):
        wx_plus_b = (weight*df['pounds'][i])+bias
        wx_plus_b_minus_y = wx_plus_b-df['mpg'][i]
        bias_update_summation.append(2*wx_plus_b_minus_y)
    total_sum = sum(bias_update_summation)
    return total_sum//len(df)

def new_weight_update(old_weight,lr,slope):
    return old_weight-1*lr*slope

def new_bias_update(old_bias,lr,slope):
    return old_bias-1*lr*slope

Which yields:

iter weight  bias   loss
0    0.00    0.00   303.0
0    1.20    0.35   170.0
0    2.06    0.60   102.0
0    2.67    0.79   67.0
0    3.10    0.93   50.0
0    3.41    1.04   41.0

This differs from the provided solution provided on the website:

Iteration   Weight  Bias    Loss (MSE)
1           0       0       303.71
2           1.2     0.34    170.67
3           2.75    0.59    67.3
4           3.17    0.72    50.63
5           3.47    0.82    42.1
6           3.68    0.9     37.74

Solution

  • So I made some minor adjustments to my script based on the feedback from the AI chat bot provided by google. See below:

    import pandas as pd
    import numpy as np
    
    def main():
        data = [3.5, 18], [3.69, 15], [3.44, 18], [3.43, 16], [4.34, 15], [4.42, 14], [2.37, 24]
        initial_data_df = pd.DataFrame(data,columns=['pounds','mpg'])
    
        number_of_iterations = 6
        weight = 0 # initialize weights
        bias = 0 # initialize weights
        weight_slope = 0
        bias_slope = 0
        final_results_df = pd.DataFrame()
        learning_rate = 0.01
    
        for i in range(number_of_iterations):
            loss = round(calculate_loss(initial_data_df,weight,bias),2)
            final_results_df = update_results(final_results_df,weight,bias,loss)
            weight_slope = find_weight_slope(initial_data_df,weight,bias)
            bias_slope = find_bias_slope(initial_data_df,weight,bias)
            weight = round(new_weight_update(weight,learning_rate,weight_slope),2)
            bias = round(new_bias_update(bias,learning_rate,bias_slope),2)
        print(final_results_df)
    
    def calculate_loss(df,weight,bias):
        loss_summation = []
        for i in range(0,len(df)):
            loss_summation.append((df['mpg'][i]-((weight*df['pounds'][i])+bias))**2)
        return (sum(loss_summation)/len(df))
    
    def update_results(df,weight,bias,loss):
        if df.empty:
            df = pd.DataFrame([[weight,bias,loss]],columns=['weight','bias','loss'])
        else:
            df = pd.concat([df,pd.DataFrame([[weight,bias,loss]],columns=df.columns)])
        return df
    
    def find_weight_slope(df,weight,bias):
        slope = []
        for i in range(0,len(df)):
            predicted_mpg = (weight*df['pounds'][i])+bias   #   predicted_mpg = (weight * car_weight) + bias
            error = predicted_mpg-df['mpg'][i]
            slope.append(error*2*df['pounds'][i])
        return sum(slope)/len(df)
    
    def find_bias_slope(df,weight,bias):
        slope = []
        for i in range(0,len(df)):
            predicted_mpg = (weight*df['pounds'][i])+bias   #   predicted_mpg = (weight * car_weight) + bias
            error = predicted_mpg-df['mpg'][i]
            slope.append(2*error)
        return sum(slope)/len(df)
    
    def new_weight_update(old_weight,lr,slope):
        return old_weight-(lr*slope)
    
    def new_bias_update(old_bias,lr,slope):
        return old_bias-(lr*slope)
    
    if __name__=='__main__':
        main()
    

    With these changes I get the following results:

       weight  bias    loss
    0    0.00  0.00  303.71
    0    1.20  0.34  170.67
    0    2.05  0.59  103.22
    0    2.66  0.77   68.66
    0    3.09  0.91   51.13
    0    3.40  1.01   42.11
    

    I provided the calculated weighted slope to get the next updated weight which should be the third iteration weight value.

    enter image description here

    With these results and documentation I would conclude that google's example results are incorrect for gradient descent.