How to implement a gradient descent in python to find a local minimum ?

Published: July 20, 2020

Tags: Python; Machine learning;

DMCA.com Protection Status

Examples of how to implement a gradient descent in python to find a local minimum:

Gradient descent with a 1D function

How to implement a gradient descent in python to find a local minimum ?
How to implement a gradient descent in python to find a local minimum ?

from scipy import misc

import matplotlib.pyplot as plt
import numpy as np

def fonction(x):
    return 3*x*x+2*x+1

x = np.arange(-2.0, 2.0, 0.01)
y = fonction(x)

plt.plot(x, y,'r-')
#plt.show()

alpha = 0.1 # learning rate
nb_max_iter = 100 # Nb max d'iteration
eps = 0.0001 # stop condition

x0 = 1.5 # start point
y0 = fonction(x0)
plt.scatter(x0, fonction(x0))

cond = eps + 10.0 # start with cond greater than eps (assumption)
nb_iter = 0 
tmp_y = y0
while cond > eps and nb_iter < nb_max_iter:
    x0 = x0 - alpha * misc.derivative(fonction, x0)
    y0 = fonction(x0)
    nb_iter = nb_iter + 1
    cond = abs( tmp_y - y0 )
    tmp_y = y0
    print x0,y0,cond
    plt.scatter(x0, y0)

plt.title("Gradient Descent Python (1d test)")
plt.grid()

plt.savefig("gradient_descent_1d_python.png", bbox_inches='tight')
plt.show()

Gradient descent with a 2D function

How to implement a gradient descent in python to find a local minimum ?
How to implement a gradient descent in python to find a local minimum ?

from scipy import misc

import matplotlib.pyplot as plt
import numpy as np
import math

#----------------------------------------------------------------------------------------#
# Function

def fonction(x1,x2):
    return - 1.0 * math.exp(-x1**2 - x2**2);

def partial_derivative(func, var=0, point=[]):
    args = point[:]
    def wraps(x):
        args[var] = x
        return func(*args)
    return misc.derivative(wraps, point[var], dx = 1e-6)

#----------------------------------------------------------------------------------------#
# Plot Function

x1 = np.arange(-2.0, 2.0, 0.1)
x2 = np.arange(-2.0, 2.0, 0.1)

xx1,xx2 = np.meshgrid(x1,x2);

z = - 1.0 * np.exp(-xx1**2 - xx2**2);

h = plt.contourf(x1,x2,z)
#plt.show()

#----------------------------------------------------------------------------------------#
# Gradient Descent

alpha = 0.1 # learning rate
nb_max_iter = 100 # Nb max d'iteration
eps = 0.0001 # stop condition

x1_0 = 1.0 # start point
x2_0 = 1.5 
z0 = fonction(x1_0,x2_0)
plt.scatter(x1_0,x2_0)

cond = eps + 10.0 # start with cond greater than eps (assumption)
nb_iter = 0 
tmp_z0 = z0
while cond > eps and nb_iter < nb_max_iter:
    tmp_x1_0 = x1_0 - alpha * partial_derivative(fonction, 0, [x1_0,x2_0])
    tmp_x2_0 = x2_0 - alpha * partial_derivative(fonction, 1, [x1_0,x2_0])
    x1_0 = tmp_x1_0
    x2_0 = tmp_x2_0
    z0 = fonction(x1_0,x2_0)
    nb_iter = nb_iter + 1
    cond = abs( tmp_z0 - z0 )
    tmp_z0 = z0
    print x1_0,x2_0,cond
    plt.scatter(x1_0, x2_0)

plt.title("Gradient Descent Python (2d test)")

plt.savefig("gradiend_descent_2d_python.png", bbox_inches='tight')
plt.show()

Gradient descent with a 3D function

How to implement a gradient descent in python to find a local minimum ?
How to implement a gradient descent in python to find a local minimum ?

from scipy import misc

import matplotlib.pyplot as plt
import numpy as np
import math

#----------------------------------------------------------------------------------------#
# Function

global x1_sphere 
global x2_sphere 
global x3_sphere

x1_sphere = 1.5
x2_sphere = 2.5
x3_sphere = 0.5

def fonction(x1,x2,x3):
    r1 = x1 - x1_sphere
    r2 = x2 - x2_sphere
    r3 = x3 - x3_sphere
    return r1*r1 + r2*r2 + r3*r3 ;

def partial_derivative(func, var=0, point=[]):
    args = point[:]
    def wraps(x):
        args[var] = x
        return func(*args)
    return misc.derivative(wraps, point[var], dx = 1e-6)

#----------------------------------------------------------------------------------------#
# Plot Function

x1 = np.arange(0.0, 3.0, 0.1)
x2 = np.arange(0.0, 3.0, 0.1)
x3 = np.arange(0.0, 3.0, 0.1)

dim_x1 = x1.shape[0]
dim_x2 = x2.shape[0]
dim_x3 = x3.shape[0]

print dim_x1

z2 = np.zeros((dim_x1,dim_x2))
z3 = np.zeros((dim_x1,dim_x3))

for i in np.arange(dim_x1):
    for j in np.arange(dim_x2):
        r1 = x1[i] - x1_sphere
        r2 = x2[j] - x2_sphere
        r3 = 0.0   - x3_sphere
        z2[i,j] = r1*r1 + r2*r2 + r3*r3

h = plt.contourf(x1,x2,z2)
plt.savefig("gradient_descent_3d_python_x1_x2", bbox_inches='tight')
plt.close()

for i in np.arange(dim_x1):
    for j in np.arange(dim_x3):
        r1 = x1[i] - x1_sphere
        r2 = 0.0   - x2_sphere
        r3 = x3[j] - x3_sphere
        z3[i,j] = r1*r1 + r2*r2 + r3*r3

h = plt.contourf(x1,x3,z3)
plt.savefig("gradient_descent_3d_python_x1_x3", bbox_inches='tight')

#----------------------------------------------------------------------------------------#
# Gradient Descent

alpha = 0.1 # learning rate
nb_max_iter = 100 # Nb max d'iteration
eps = 0.0001 # stop condition

x1_0 = 0.0 # start point
x2_0 = 0.5 
x3_0 = 0.0 
z0 = fonction(x1_0,x2_0,x3_0)
plt.scatter(x1_0,x3_0)

cond = eps + 10.0 # start with cond greater than eps (assumption)
nb_iter = 0 
tmp_z0 = z0
while cond > eps and nb_iter < nb_max_iter:
    tmp_x1_0 = x1_0 - alpha * partial_derivative(fonction, 0, [x1_0,x2_0,x3_0])
    tmp_x2_0 = x2_0 - alpha * partial_derivative(fonction, 1, [x1_0,x2_0,x3_0])
    tmp_x3_0 = x3_0 - alpha * partial_derivative(fonction, 2, [x1_0,x2_0,x3_0])
    x1_0 = tmp_x1_0
    x2_0 = tmp_x2_0
    x3_0 = tmp_x3_0
    z0 = fonction(x1_0,x2_0,x3_0)
    nb_iter = nb_iter + 1
    cond = abs( tmp_z0 - z0 )
    tmp_z0 = z0
    print x1_0,x2_0,x3_0,cond
    plt.scatter(x3_0, x1_0)

plt.title("Gradient Descent Python (3d test)")

plt.savefig("gradient_descent_23_python.png", bbox_inches='tight')
plt.show()

References

Image

of