This problem is about making a AND, OR, NAND logic gate on Python with the Stochastic Gradient Descent algorithm and concept of Perceptron. So, the thing is how to make a valid code with the custom function called SGD. For the whole code,
import numpy as np
class perceptron:
def __init__(self, w):
self.w = w
def output(self, x):
y_tmp = np.dot(self.w, np.append([1], x))
return 1.0 * (y_tmp > 0) #output = [1, w1, w2], stored as gate.w
# Your function goes here
x_list = [[0,0], [0,1],[1,0],[1,1]]
t_and = [-1, -1, -1, 1]
t_or = [-1,1,1,1]
t_nand = [1,1,1,-1]
w_init = [0,0,0] #initialize the concept
and_gate = perceptron(w_init)
SGD(and_gate, x_list, t_and)
or_gate = perceptron(w_init)
SGD(or_gate, x_list, t_or)
nand_gate = perceptron(w_init)
SGD(nand_gate, x_list,t_nand)
print('=== AND gate ===')
for x in x_list:
print(x, '=>' ,and_gate.output(x))
print('=== OR gate ===')
for x in x_list:
print(x, '=>' ,or_gate.output(x))
print('=== NAND gate ===')
for x in x_list:
print(x, '=>' ,nand_gate.output(x))
What I have tried is that since the algorithm needs some epoch to evaluate their limitaitons, and learning rate derived from this mathematical logic, I made this code like this on where we should put our function, then ran it. Stochastic Gradient Descent mathematical logic
def SGD(gate, x_list, t):
eta = 0.001
for epoch in range(100):
for i,x in enumerate(x_list):
gate.w[1:] += eta * (t[i]-gate.output(x)) * t[i]
gate.w[0] += eta * (t[i]-gate.output(x))
It made some progress, but it doesn't fit to the concept where the and, or, nand gate operates, like
=== AND gate ===
[0, 0] => 0.0
[0, 1] => 1.0
[1, 0] => 1.0
[1, 1] => 1.0
=== OR gate ===
[0, 0] => 0.0
[0, 1] => 1.0
[1, 0] => 1.0
[1, 1] => 1.0
=== NAND gate ===
[0, 0] => 0.0
[0, 1] => 1.0
[1, 0] => 1.0
[1, 1] => 1.0
According to the logical gate, should I make the if statement to fit all the needs of the logical gate of or and nand? Feel free to ask, or advise me. I wish you have a good time today.
Try this.
import numpy as np
class Perceptron:
def __init__(self, w):
self.w = w
def output(self, x):
y_tmp = np.dot(self.w, np.append([1], x))
return 1.0 * (y_tmp > 0)
def SGD(gate, x_list, t, learning_rate=0.01, epochs=100):
for epoch in range(epochs):
for i in range(len(x_list)):
x = np.append([1], x_list[i])
y = np.dot(gate.w, x)
if y * t[i] <= 0:
gate.w += learning_rate * t[i] * x
x_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
t_and = [-1, -1, -1, 1]
t_or = [-1, 1, 1, 1]
t_nand = [1, 1, 1, -1]
w_init = np.array([0.0, 0.0, 0.0])
and_gate = Perceptron(w_init)
SGD(and_gate, x_list, t_and)
or_gate = Perceptron(w_init.copy())
SGD(or_gate, x_list, t_or)
nand_gate = Perceptron(w_init.copy())
SGD(nand_gate, x_list, t_nand)
print('=== AND gate ===')
for x in x_list:
print(x, '->', and_gate.output(x))
print('=== OR gate ===')
for x in x_list:
print(x, '->', or_gate.output(x))
print('=== NAND gate ===')
for x in x_list:
print(x, '->', nand_gate.output(x))
Hope this helps.