-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathsparseFiltering.py
73 lines (57 loc) · 1.54 KB
/
sparseFiltering.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
"""
==================
Sparse filtering
==================
Python port of the sparse filtering matlab code by Jiquan Ngiam.
Requires numpy and scipy installed.
"""
import numpy as np
from scipy.optimize import minimize
def l2row(X):
"""
L2 normalize X by rows. We also use this to normalize by column with l2row(X.T)
"""
N = np.sqrt((X**2).sum(axis=1)+1e-8)
Y = (X.T/N).T
return Y,N
def l2rowg(X,Y,N,D):
"""
Backpropagate through Normalization.
Parameters
----------
X = Raw (possibly centered) data.
Y = Row normalized data.
N = Norms of rows.
D = Deltas of previous layer. Used to compute gradient.
Returns
-------
L2 normalized gradient.
"""
return (D.T/N - Y.T * (D*X).sum(axis=1) / N**2).T
def sparseFiltering(N,X):
"N = # features, X = input data (examples in column)"
optW = np.random.randn(N,X.shape[0])
# Objective function!
def objFun(W):
# Feed forward
W = W.reshape((N,X.shape[0]))
F = W.dot(X)
Fs = np.sqrt(F**2 + 1e-8)
NFs, L2Fs = l2row(Fs)
Fhat, L2Fn = l2row(NFs.T)
# Compute objective function
# Backprop through each feedforward step
DeltaW = l2rowg(NFs.T, Fhat, L2Fn, np.ones(Fhat.shape))
DeltaW = l2rowg(Fs, NFs, L2Fs, DeltaW.T)
DeltaW = (DeltaW*(F/Fs)).dot(X.T)
return Fhat.sum(), DeltaW.flatten()
# Actual optimization
w,g = objFun(optW)
res = minimize(objFun, optW, method='L-BFGS-B', jac = True, options = {'maxiter':200})
return res.x.reshape(N,X.shape[0])
def feedForwardSF(W,X):
"Feed-forward"
F = W.dot(X)
Fs = np.sqrt(F**2 + 1e-8)
NFs = l2row(Fs)[0]
return l2row(NFs.T)[0].T