karpathy/nn-zero-to-hero: Neural Networks: Zero to Hero

1.导数 slope

通过下面的方法可求得导数,即(f(x+h)-f(x))/h h趋于0

1
2
3
4
5
6
7
8
9
10
11
12
13
14
h = 0.0001

# inputs
a = 2.0
b = -3.0
c = 10.0

d1 = a*b + c
c += h
d2 = a*b + c

print('d1', d1)
print('d2', d2)
print('slope', (d2 - d1)/h)

2.前向传播

如果只考虑前向传播,代码的逻辑只是做类的封装,把原有的数据封装到新的Value类中

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import math

class Value:
def __init__(self, data):
self.data = data
def __repr__(self):
return f"Value(data={self.data})"
def __add__(self, other):
return Value(self.data + other.data)
def __mul__(self, other):
return Value(self.data * other.data)
def tanh(self):
x = self.data
t = (math.exp(2*x) - 1)/(math.exp(2*x) + 1)
return t

def main():
a = Value(2.0)
b = Value(-3.0)
c = Value(10.0)
d = a*b + c;
f = Value(-2.0)
L = d * f
print(L)
main()

为了便于之后的反向传播,我们还需要记录某个节点由哪些节点生成、以及是如何生成的,加上_prev属性,对应_children元组,表示由哪些节点操作而成,操作用op记录

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
class Value:
def __init__(self, data, _children=(), _op=''):
self.data = data
self._prev = set(_children) #为了性能用的set
self._op = _op
def __repr__(self):
return f"Value(data={self.data})"
def __add__(self, other):
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
return Value(self.data * other.data, (self, other), '*')
def tanh(self):
x = self.data
t = (math.exp(2*x) - 1)/(math.exp(2*x) + 1 ,(self, ), 'tanh')
return t

def main():
a = Value(2.0)
b = Value(-3.0)
c = Value(10.0)
d = a*b + c;
f = Value(-2.0)
L = d * f
draw_dot(L)
print(L)
main()

graphviz可视化整个前向传播的过程,为了方便查看加上label变量,并对中间的变量进行了定义e:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import math

from sklearn.tree import export_graphviz
from graphviz import Digraph

# 下面两个函数直接用,是用来可视化的
def trace(root):
# builds a set of all nodes and edges in a graph
nodes, edges = set(), set()
def build(v):
if v not in nodes:
nodes.add(v)
for child in v._prev:
edges.add((child, v))
build(child)
build(root)
return nodes, edges

def draw_dot(root):
dot = Digraph(format='svg', graph_attr={'rankdir': 'LR'}) # LR = left to right

nodes, edges = trace(root)
for n in nodes:
uid = str(id(n))
# for any value in the graph, create a rectangular ('record') node for it
dot.node(name = uid, label = "{ %s | %.4f }" % (n.label, n.data), shape='record')
if n._op:
# if this value is a result of some operation, create an op node for it
dot.node(name = uid + n._op, label = n._op)
# and connect this node to it
dot.edge(uid + n._op, uid)

for n1, n2 in edges:
# connect n1 to the op node of n2
dot.edge(str(id(n1)), str(id(n2)) + n2._op)
dot.save('output.dot')
return dot

class Value:
def __init__(self, data, _children=(), _op='', label=''):
self.data = data
self._prev = set(_children) #为了性能用的set
self._op = _op
self.label = label
def __repr__(self):
return f"Value(data={self.data})"
def __add__(self, other):
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
return Value(self.data * other.data, (self, other), '*')
def tanh(self):
x = self.data
t = (math.exp(2*x) - 1)/(math.exp(2*x) + 1 ,(self, ), 'tanh')
return t

def main():
a = Value(2.0, label='a')
b = Value(-3.0, label='b')
c = Value(10.0, label='c')
e = a*b; e.label = 'e'
d = e + c; d.label = 'd'
f = Value(-2.0, label='f')
L = d * f; L.label = 'L'
draw_dot(L)
print(L)
main()

image-20250428144750866

这样我们就得到了一张不含梯度的计算图

3.加入梯度

下面加入梯度(dL/dx,x当前的变量),根据链式法则,我们实际上不需要按照1中的求导方法

image-20250428144750866

比如L的梯度就是1,因为dL/dL = 1,然后计算f和d,因为L=f*d,所以dL/df = d,所以f.grad=4,d.grad=-2,然后求dL/dc,根据链式法则,dL/dc = (dL/dd) * (dd/dc) = -2*(dd/dc),因为是加法,所以dd/dc = 1,所以dL/dc = -2,同理dL/de=-2(由此可以看出,如果操作是+,那么操作数的梯度等于该操作结果的梯度,这里可能有点小问题,暂时这样看),继续 dL/da = dL/de * de/da = -2*(de/da),因为e = a*b, 所以de/da = b =-3,所以dL/da = 6,同理dL/db = -4(可以看出如果操作是*,操作数的梯度等于该操作结果的梯度乘以另外一个操作数)

variable grad
a 6
b -4
c -2
e -2
f 4
d -2
L 1

按照类似上面的做法,加入grad,每个操作在做前向传播的时候给out记录下反向传播时如何给操作节点分配梯度,在做完前向传播后,对节点进行拓扑排序,然后设置最终结果的grad为1,反转拓扑排序得到的列表,调用节点自己的_backward函数进行梯度的反向传播

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import math

from sklearn.tree import export_graphviz
from graphviz import Digraph

def trace(root):
# builds a set of all nodes and edges in a graph
nodes, edges = set(), set()
def build(v):
if v not in nodes:
nodes.add(v)
for child in v._prev:
edges.add((child, v))
build(child)
build(root)
return nodes, edges

def draw_dot(root):
dot = Digraph(format='svg', graph_attr={'rankdir': 'LR'}) # LR = left to right

nodes, edges = trace(root)
for n in nodes:
uid = str(id(n))
# for any value in the graph, create a rectangular ('record') node for it
dot.node(name = uid, label = "{ %s | %.4f | grad %.4f }" % (n.label, n.data, n.grad), shape='record')
if n._op:
# if this value is a result of some operation, create an op node for it
dot.node(name = uid + n._op, label = n._op)
# and connect this node to it
dot.edge(uid + n._op, uid)

for n1, n2 in edges:
# connect n1 to the op node of n2
dot.edge(str(id(n1)), str(id(n2)) + n2._op)
dot.save('output.dot')
return dot

class Value:
def __init__(self, data, _children=(), _op='', label=''):
self.data = data
self._prev = set(_children) #为了性能用的set
self._op = _op
self.label = label
self.grad = 0.0
self._backward = lambda: None
def __repr__(self):
return f"Value(data={self.data})"
def __add__(self, other):
out = Value(self.data + other.data, (self, other), '+')

def _backward():
self.grad = 1.0 * out.grad
other.grad = 1.0 * out.grad
out._backward = _backward
return out

def __mul__(self, other):
out = Value(self.data * other.data, (self, other), '*')

def _backward():
self.grad = out.grad * other.data
other.grad = out.grad * self.data
out._backward = _backward
return out

def tanh(self):
x = self.data
t = (math.exp(2*x) - 1)/(math.exp(2*x) + 1)
out = Value(t,(self, ), 'tanh')

def _backward():
self.grad = (1 - t**2) * out.grad
out._backward = _backward
return out

def backward(self):
# 首先拓扑排序得到操作数->最终结果节点的列表
topo = []
visited = set()
def build_topo(v):
if v not in visited:
visited.add(v)
for child in v._prev:
build_topo(child)
topo.append(v)
build_topo(self)

# 设置最终结果的grad为1,然后翻转列表调用_backward进行反向传播
self.grad = 1.0
for node in reversed(topo):
node._backward()

def main():
a = Value(2.0, label='a')
b = Value(-3.0, label='b')
c = Value(10.0, label='c')
e = a*b; e.label = 'e'
d = e + c; d.label = 'd'
f = Value(-2.0, label='f')
L = d * f; L.label = 'L'
L.backward()
draw_dot(L)
print(L)
main()

image-20250428155008057

这里有个小问题,对于下面的两个实例,上述代码是有误的:

1
2
3
4
a = Value(3.0, label='a')
b = a + a ; b.label = 'b'
b.backward()
draw_dot(b)

image-20250428155757165

这里在做反向传播的时候,给a的梯度赋了两次值,第二次的grad=1覆盖了第一次的grad=1,而不是累加

1
2
3
4
5
6
7
8
9
a = Value(-2.0, label='a')
b = Value(3.0, label='b')
d = a * b ; d.label = 'd'
e = a + b ; e.label = 'e'
f = d * e ; f.label = 'f'

f.backward()

draw_dot(f)

image-20250428160213646

这种也是被覆盖了,所以梯度的计算应该是累积的,之前的=要改成+=,此外考虑一些其他的基本操作:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
class Value:
def __init__(self, data, _children=(), _op='', label=''):
self.data = data
self.grad = 0.0
self._backward = lambda: None
self._prev = set(_children)
self._op = _op
self.label = label

def __repr__(self):
return f"Value(data={self.data})"

def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
out = Value(self.data + other.data, (self, other), '+')

def _backward():
self.grad += 1.0 * out.grad
other.grad += 1.0 * out.grad
out._backward = _backward

return out

def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
out = Value(self.data * other.data, (self, other), '*')

def _backward():
self.grad += other.data * out.grad
other.grad += self.data * out.grad
out._backward = _backward

return out

def __pow__(self, other):
assert isinstance(other, (int, float)), "only supporting int/float powers for now"
out = Value(self.data**other, (self,), f'**{other}')

def _backward():
self.grad += other * (self.data ** (other - 1)) * out.grad
out._backward = _backward

return out

def __rmul__(self, other): # other * self
return self * other

def __truediv__(self, other): # self / other
return self * other**-1

def __neg__(self): # -self
return self * -1

def __sub__(self, other): # self - other
return self + (-other)

def __radd__(self, other): # other + self
return self + other

def tanh(self):
x = self.data
t = (math.exp(2*x) - 1)/(math.exp(2*x) + 1)
out = Value(t, (self, ), 'tanh')

def _backward():
self.grad += (1 - t**2) * out.grad
out._backward = _backward

return out

def exp(self):
x = self.data
out = Value(math.exp(x), (self, ), 'exp')

def _backward():
self.grad += out.data * out.grad # NOTE: in the video I incorrectly used = instead of +=. Fixed here.
out._backward = _backward

return out


def backward(self):

topo = []
visited = set()
def build_topo(v):
if v not in visited:
visited.add(v)
for child in v._prev:
build_topo(child)
topo.append(v)
build_topo(self)

self.grad = 1.0
for node in reversed(topo):
node._backward()

4.从神经元到MLP

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
class Neuron:

def __init__(self, nin):
self.w = [Value(random.uniform(-1,1)) for _ in range(nin)]
self.b = Value(random.uniform(-1,1))

def __call__(self, x):
act = sum((wi*xi for wi,xi in zip(self.w,x)), self.b)
out = act.tanh()
return out

def parameters(self):
return self.w + [self.b]

class Layer:
# n个神经元组成的一层
def __init__(self, nin, nout):
self.neurons = [Neuron(nin) for _ in range(nout)]

def __call__(self, x):
outs = [n(x) for n in self.neurons]
return outs[0] if len(outs) == 1 else outs

def parameters(self):
return [p for neuron in self.neurons for p in neuron.parameters()]

class MLP:
# MLP多层感知机, nin是输入的维度,nouts是个列表,记录每层的输出维度
def __init__(self, nin, nouts):
sz = [nin] + nouts
self.layers = [Layer(sz[i],sz[i+1]) for i in range(len(nouts))]

def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x

def parameters(self):
return [p for layer in self.layers for p in layer.parameters()]

5.训练神经网络

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
def main():
n = MLP(3, [4, 4, 1])

xs = [
[2.0, 3.0, -1.0],
[3.0, -1.0, 0.5],
[0.5, 1.0, 1.0],
[1.0, 1.0, -1.0],
]
ys = [1.0, -1.0, -1.0, 1.0] # desired targets

for k in range(20):

# forward pass
ypred = [n(x) for x in xs]
loss = sum((yout - ygt)**2 for ygt, yout in zip(ys, ypred))

# backward pass
for p in n.parameters():
p.grad = 0.0
loss.backward()

# update
for p in n.parameters():
p.data += -0.1 * p.grad

print(k, loss.data)
print(ypred)

6.完整代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import math
import random

from sklearn.tree import export_graphviz
from graphviz import Digraph

def trace(root):
# builds a set of all nodes and edges in a graph
nodes, edges = set(), set()
def build(v):
if v not in nodes:
nodes.add(v)
for child in v._prev:
edges.add((child, v))
build(child)
build(root)
return nodes, edges

def draw_dot(root):
dot = Digraph(format='svg', graph_attr={'rankdir': 'LR'}) # LR = left to right

nodes, edges = trace(root)
for n in nodes:
uid = str(id(n))
# for any value in the graph, create a rectangular ('record') node for it
dot.node(name = uid, label = "{ %s | %.4f | grad %.4f }" % (n.label, n.data, n.grad), shape='record')
if n._op:
# if this value is a result of some operation, create an op node for it
dot.node(name = uid + n._op, label = n._op)
# and connect this node to it
dot.edge(uid + n._op, uid)

for n1, n2 in edges:
# connect n1 to the op node of n2
dot.edge(str(id(n1)), str(id(n2)) + n2._op)
dot.save('output.dot')
return dot

class Value:
def __init__(self, data, _children=(), _op='', label=''):
self.data = data
self.grad = 0.0
self._backward = lambda: None
self._prev = set(_children)
self._op = _op
self.label = label

def __repr__(self):
return f"Value(data={self.data})"

def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
out = Value(self.data + other.data, (self, other), '+')

def _backward():
self.grad += 1.0 * out.grad
other.grad += 1.0 * out.grad
out._backward = _backward

return out

def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
out = Value(self.data * other.data, (self, other), '*')

def _backward():
self.grad += other.data * out.grad
other.grad += self.data * out.grad
out._backward = _backward

return out

def __pow__(self, other):
assert isinstance(other, (int, float)), "only supporting int/float powers for now"
out = Value(self.data**other, (self,), f'**{other}')

def _backward():
self.grad += other * (self.data ** (other - 1)) * out.grad
out._backward = _backward

return out

def __rmul__(self, other): # other * self
return self * other

def __truediv__(self, other): # self / other
return self * other**-1

def __neg__(self): # -self
return self * -1

def __sub__(self, other): # self - other
return self + (-other)

def __radd__(self, other): # other + self
return self + other

def tanh(self):
x = self.data
t = (math.exp(2*x) - 1)/(math.exp(2*x) + 1)
out = Value(t, (self, ), 'tanh')

def _backward():
self.grad += (1 - t**2) * out.grad
out._backward = _backward

return out

def exp(self):
x = self.data
out = Value(math.exp(x), (self, ), 'exp')

def _backward():
self.grad += out.data * out.grad # NOTE: in the video I incorrectly used = instead of +=. Fixed here.
out._backward = _backward

return out


def backward(self):

topo = []
visited = set()
def build_topo(v):
if v not in visited:
visited.add(v)
for child in v._prev:
build_topo(child)
topo.append(v)
build_topo(self)

self.grad = 1.0
for node in reversed(topo):
node._backward()

class Neuron:

def __init__(self, nin):
self.w = [Value(random.uniform(-1,1)) for _ in range(nin)]
self.b = Value(random.uniform(-1,1))

def __call__(self, x):
act = sum((wi*xi for wi,xi in zip(self.w,x)), self.b)
out = act.tanh()
return out

def parameters(self):
return self.w + [self.b]

class Layer:
# n个神经元组成的一层
def __init__(self, nin, nout):
self.neurons = [Neuron(nin) for _ in range(nout)]

def __call__(self, x):
outs = [n(x) for n in self.neurons]
return outs[0] if len(outs) == 1 else outs

def parameters(self):
return [p for neuron in self.neurons for p in neuron.parameters()]

class MLP:
# MLP多层感知机, nin是输入的维度,nouts是个列表,记录每层的输出维度
def __init__(self, nin, nouts):
sz = [nin] + nouts
self.layers = [Layer(sz[i],sz[i+1]) for i in range(len(nouts))]

def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x

def parameters(self):
return [p for layer in self.layers for p in layer.parameters()]

def main():
n = MLP(3, [4, 4, 1])

xs = [
[2.0, 3.0, -1.0],
[3.0, -1.0, 0.5],
[0.5, 1.0, 1.0],
[1.0, 1.0, -1.0],
]
ys = [1.0, -1.0, -1.0, 1.0] # desired targets

for k in range(20):

# forward pass
ypred = [n(x) for x in xs]
loss = sum((yout - ygt)**2 for ygt, yout in zip(ys, ypred))

# backward pass
for p in n.parameters():
p.grad = 0.0
loss.backward()

# update
for p in n.parameters():
p.data += -0.1 * p.grad

print(k, loss.data)
print(ypred)
main()