@@ -73,8 +73,8 @@ def phi(alpha1):
73
73
return alpha , fc [0 ], phi1
74
74
75
75
76
- def do_linesearch (cost , G , deltaG , Mi , f_val ,
77
- armijo = True , C1 = None , C2 = None , reg = None , Gc = None , constC = None , M = None ):
76
+ def solve_linesearch (cost , G , deltaG , Mi , f_val ,
77
+ armijo = True , C1 = None , C2 = None , reg = None , Gc = None , constC = None , M = None ):
78
78
"""
79
79
Solve the linesearch in the FW iterations
80
80
Parameters
@@ -93,17 +93,17 @@ def do_linesearch(cost, G, deltaG, Mi, f_val,
93
93
If True the steps of the line-search is found via an armijo research. Else closed form is used.
94
94
If there is convergence issues use False.
95
95
C1 : ndarray (ns,ns), optional
96
- Structure matrix in the source domain. Only used when armijo=False
96
+ Structure matrix in the source domain. Only used and necessary when armijo=False
97
97
C2 : ndarray (nt,nt), optional
98
- Structure matrix in the target domain. Only used when armijo=False
98
+ Structure matrix in the target domain. Only used and necessary when armijo=False
99
99
reg : float, optional
100
- Regularization parameter. Only used when armijo=False
100
+ Regularization parameter. Only used and necessary when armijo=False
101
101
Gc : ndarray (ns,nt)
102
- Optimal map found by linearization in the FW algorithm. Only used when armijo=False
102
+ Optimal map found by linearization in the FW algorithm. Only used and necessary when armijo=False
103
103
constC : ndarray (ns,nt)
104
- Constant for the gromov cost. See [24]. Only used when armijo=False
104
+ Constant for the gromov cost. See [24]. Only used and necessary when armijo=False
105
105
M : ndarray (ns,nt), optional
106
- Cost matrix between the features. Only used when armijo=False
106
+ Cost matrix between the features. Only used and necessary when armijo=False
107
107
Returns
108
108
-------
109
109
alpha : float
@@ -128,7 +128,7 @@ def do_linesearch(cost, G, deltaG, Mi, f_val,
128
128
b = np .sum ((M + reg * constC ) * deltaG ) - 2 * reg * (np .sum (dot12 * G ) + np .sum (np .dot (C1 , G ).dot (C2 ) * deltaG ))
129
129
c = cost (G )
130
130
131
- alpha = solve_1d_linesearch_quad_funct (a , b , c )
131
+ alpha = solve_1d_linesearch_quad (a , b , c )
132
132
fc = None
133
133
f_val = cost (G + alpha * deltaG )
134
134
@@ -181,7 +181,7 @@ def cg(a, b, M, reg, f, df, G0=None, numItermax=200,
181
181
Print information along iterations
182
182
log : bool, optional
183
183
record log if True
184
- kwargs : dict
184
+ ** kwargs : dict
185
185
Parameters for linesearch
186
186
187
187
Returns
@@ -244,7 +244,7 @@ def cost(G):
244
244
deltaG = Gc - G
245
245
246
246
# line search
247
- alpha , fc , f_val = do_linesearch (cost , G , deltaG , Mi , f_val , reg = reg , M = M , Gc = Gc , ** kwargs )
247
+ alpha , fc , f_val = solve_linesearch (cost , G , deltaG , Mi , f_val , reg = reg , M = M , Gc = Gc , ** kwargs )
248
248
249
249
G = G + alpha * deltaG
250
250
@@ -254,7 +254,7 @@ def cost(G):
254
254
255
255
abs_delta_fval = abs (f_val - old_fval )
256
256
relative_delta_fval = abs_delta_fval / abs (f_val )
257
- if relative_delta_fval < stopThr and abs_delta_fval < stopThr2 :
257
+ if relative_delta_fval < stopThr or abs_delta_fval < stopThr2 :
258
258
loop = 0
259
259
260
260
if log :
@@ -395,7 +395,7 @@ def cost(G):
395
395
abs_delta_fval = abs (f_val - old_fval )
396
396
relative_delta_fval = abs_delta_fval / abs (f_val )
397
397
398
- if relative_delta_fval < stopThr and abs_delta_fval < stopThr2 :
398
+ if relative_delta_fval < stopThr or abs_delta_fval < stopThr2 :
399
399
loop = 0
400
400
401
401
if log :
@@ -413,11 +413,11 @@ def cost(G):
413
413
return G
414
414
415
415
416
- def solve_1d_linesearch_quad_funct (a , b , c ):
416
+ def solve_1d_linesearch_quad (a , b , c ):
417
417
"""
418
- Solve on 0,1 the following problem:
418
+ For any convex or non-convex 1d quadratic function f, solve on [ 0,1] the following problem:
419
419
.. math::
420
- \min f(x)=a*x^{2}+b*x+c
420
+ \a rgmin f(x)=a*x^{2}+b*x+c
421
421
422
422
Parameters
423
423
----------
0 commit comments