pythonmachine-learninginitializationgradient-descentconvergence

initial value error while using gradient descent algorithm


Problem: Initial value is 10000 and the solution is converging to 10000 instead of actual solution 1.

import numpy.linalg as nl
x_ini=10000

def obj(x):
    f = x**2 - 2*x + 3
    return f

def grad(x):
    df = 2*x - 2
    return df

n_iter=1000
lr=0.001

x_old = x_ini


for _ in range(True):
    
    x_new = x_old - lr*(grad(x_old))
    x_old = x_new
    
    if(nl.norm(grad(x_old))<=1e-03):
        break
    x_new = x_old
    
print(x_new)

Solution

  • while True:
        
        x_new = x_old - lr*(grad(x_old))
        x_old = x_new
        y = nl.norm(grad(x_old))
        if(y<=1e-03):
            break
        x_new = x_old
    
    print(x_new)
    

    You can change for to while