Skip to content

Commit 830f68a

Browse files
committed
Add some comments to the neuralnets
1 parent 2c15265 commit 830f68a

File tree

7 files changed

+35
-6
lines changed

7 files changed

+35
-6
lines changed

mla/neuralnet/activations.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
import autograd.numpy as np
22

3+
"""
4+
References:
5+
https://en.wikipedia.org/wiki/Activation_function
6+
"""
37

4-
# https://en.wikipedia.org/wiki/Activation_function
58

69
def sigmoid(z):
710
return 1.0 / (1.0 + np.exp(-z))

mla/neuralnet/initializations.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
import numpy as np
22

3+
"""
4+
References:
5+
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
36
7+
"""
48
def normal(shape, scale=0.5):
59
return np.random.normal(size=shape, scale=scale)
610

@@ -62,7 +66,7 @@ def he_uniform(shape, **kwargs):
6266

6367

6468
def get_initializer(name):
65-
"""Return initialization function by name"""
69+
"""Returns initialization function by the name."""
6670
try:
6771
return globals()[name]
6872
except:

mla/neuralnet/loss.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55

66
def get_loss(name):
7-
"""Return loss function by name"""
7+
"""Returns loss function by the name."""
88

99
try:
1010
return globals()[name]

mla/neuralnet/nnet.py

+14-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ def __init__(self, layers, optimizer, loss, max_epochs=10, batch_size=64, random
4242
self.training = False
4343
self._initialized = False
4444

45-
def _setup_layers(self, x_shape, ):
45+
def _setup_layers(self, x_shape):
46+
"""Initialize model's layers."""
4647
x_shape = list(x_shape)
4748
x_shape[0] = self.batch_size
4849

@@ -55,6 +56,8 @@ def _setup_layers(self, x_shape, ):
5556
logging.info('Total parameters: %s' % self.n_params)
5657

5758
def _find_bprop_entry(self):
59+
"""Find entry layer for back propagation."""
60+
5861
if len(self.layers) > 0 and not hasattr(self.layers[-1], 'parameters'):
5962
return -1
6063
return len(self.layers)
@@ -73,7 +76,10 @@ def fit(self, X, y=None):
7376
self.is_training = False
7477

7578
def update(self, X, y):
79+
# Forward pass
7680
y_pred = self.fprop(X)
81+
82+
# Backward pass
7783
grad = self.loss_grad(y, y_pred)
7884
for layer in reversed(self.layers[:self.bprop_entry]):
7985
grad = layer.backward_pass(grad)
@@ -100,14 +106,18 @@ def parametric_layers(self):
100106

101107
@property
102108
def parameters(self):
109+
"""Returns a list of all parameters."""
103110
params = []
104111
for layer in self.parametric_layers:
105112
params.append(layer.parameters)
106113
return params
107114

108115
def error(self, X=None, y=None):
116+
"""Calculate an error for given examples."""
109117
training_phase = self.is_training
110118
if training_phase:
119+
# Temporally disable training.
120+
# Some layers work differently while training (e.g. Dropout).
111121
self.is_training = False
112122
if X is None and y is None:
113123
y_pred = self._predict(self.X)
@@ -131,6 +141,7 @@ def is_training(self, train):
131141
layer.is_training = train
132142

133143
def shuffle_dataset(self):
144+
"""Shuffle rows in the dataset."""
134145
n_samples = self.X.shape[0]
135146
indices = np.arange(n_samples)
136147
np.random.shuffle(indices)
@@ -139,10 +150,12 @@ def shuffle_dataset(self):
139150

140151
@property
141152
def n_layers(self):
153+
"""Returns the number of layers."""
142154
return self._n_layers
143155

144156
@property
145157
def n_params(self):
158+
"""Return the number of trainable parameters."""
146159
return sum([layer.parameters.n_params for layer in self.parametric_layers])
147160

148161
def reset(self):

mla/neuralnet/optimizers.py

+6
Original file line numberDiff line numberDiff line change
@@ -19,29 +19,35 @@ def optimize(self, network):
1919
for i in range(network.max_epochs):
2020
if network.shuffle:
2121
network.shuffle_dataset()
22+
2223
start_time = time.time()
2324
loss = self.train_epoch(network)
2425
loss_history.append(loss)
2526
msg = "Epoch:%s, train loss: %s" % (i, loss)
27+
2628
if network.log_metric:
2729
msg += ', train %s: %s' % (network.metric_name, network.error())
2830
msg += ', elapsed: %s sec.' % (time.time() - start_time)
2931
logging.info(msg)
3032
return loss_history
3133

3234
def update(self, network):
35+
"""Performs an update of parameters."""
3336
raise NotImplementedError
3437

3538
def train_epoch(self, network):
3639
self._setup(network)
3740
losses = []
3841

42+
# Create batch iterator
3943
X_batch = batch_iterator(network.X, network.batch_size)
4044
y_batch = batch_iterator(network.y, network.batch_size)
45+
4146
for X, y in tqdm(zip(X_batch, y_batch), 'Epoch progress'):
4247
loss = np.mean(network.update(X, y))
4348
self.update(network)
4449
losses.append(loss)
50+
4551
epoch_loss = np.mean(losses)
4652
return epoch_loss
4753

mla/neuralnet/parameters.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,10 @@ def __init__(self, init='glorot_uniform', scale=0.5, bias=1.0, regularizers=None
1616
Initial values for bias.
1717
regularizers : dict
1818
Weight regularizers.
19-
{'W' : L2()}
19+
>>> {'W' : L2()}
2020
constraints : dict
21-
Weight constraints. {'b' : MaxNorm()}
21+
Weight constraints.
22+
>>> {'b' : MaxNorm()}
2223
"""
2324
if constraints is None:
2425
self.constraints = {}
@@ -60,6 +61,7 @@ def step(self, name, step):
6061
self._params[name] = self.constraints[name].clip(self._params[name])
6162

6263
def update_grad(self, name, value):
64+
"""Update gradient values."""
6365
self._grads[name] = value
6466

6567
if name in self.regularizers:

mla/neuralnet/regularizers.py

+1
Original file line numberDiff line numberDiff line change
@@ -29,5 +29,6 @@ def _penalty(self, weights):
2929

3030

3131
class ElasticNet(Regularizer):
32+
"""Linear combination of L1 and L2 penalties."""
3233
def _penalty(self, weights):
3334
return 0.5 * self.C * weights ** 2 + (1.0 - self.C) * np.abs(weights)

0 commit comments

Comments
 (0)