@@ -35,7 +35,7 @@ def sinkhorn(a, b, M, reg, method='sinkhorn', numItermax=1000,
35
35
36
36
- M is the (dim_a, dim_b) metric cost matrix
37
37
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
38
- - a and b are source and target weights (sum to 1)
38
+ - a and b are source and target weights (histograms, both sum to 1)
39
39
40
40
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [2]_
41
41
@@ -143,7 +143,7 @@ def sinkhorn2(a, b, M, reg, method='sinkhorn', numItermax=1000,
143
143
144
144
- M is the (dim_a, dim_b) metric cost matrix
145
145
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
146
- - a and b are source and target weights (sum to 1)
146
+ - a and b are source and target weights (histograms, both sum to 1)
147
147
148
148
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [2]_
149
149
@@ -251,7 +251,7 @@ def sinkhorn_knopp(a, b, M, reg, numItermax=1000,
251
251
252
252
- M is the (dim_a, dim_b) metric cost matrix
253
253
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
254
- - a and b are source and target weights (sum to 1)
254
+ - a and b are source and target weights (histograms, both sum to 1)
255
255
256
256
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [2]_
257
257
@@ -432,7 +432,7 @@ def greenkhorn(a, b, M, reg, numItermax=10000, stopThr=1e-9, verbose=False,
432
432
433
433
- M is the (dim_a, dim_b) metric cost matrix
434
434
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
435
- - a and b are source and target weights (sum to 1)
435
+ - a and b are source and target weights (histograms, both sum to 1)
436
436
437
437
438
438
@@ -578,7 +578,8 @@ def sinkhorn_stabilized(a, b, M, reg, numItermax=1000, tau=1e3, stopThr=1e-9,
578
578
579
579
- M is the (dim_a, dim_b) metric cost matrix
580
580
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
581
- - a and b are source and target weights (sum to 1)
581
+ - a and b are source and target weights (histograms, both sum to 1)
582
+
582
583
583
584
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
584
585
scaling algorithm as proposed in [2]_ but with the log stabilization
@@ -808,7 +809,8 @@ def sinkhorn_epsilon_scaling(a, b, M, reg, numItermax=100, epsilon0=1e4,
808
809
809
810
- M is the (dim_a, dim_b) metric cost matrix
810
811
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
811
- - a and b are source and target weights (sum to 1)
812
+ - a and b are source and target weights (histograms, both sum to 1)
813
+
812
814
813
815
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
814
816
scaling algorithm as proposed in [2]_ but with the log stabilization
@@ -1229,7 +1231,6 @@ def barycenter_stabilized(A, M, reg, tau=1e10, weights=None, numItermax=1000,
1229
1231
absorbing = False
1230
1232
if (u > tau ).any () or (v > tau ).any ():
1231
1233
absorbing = True
1232
- print ("YEAH absorbing" )
1233
1234
alpha = alpha + reg * np .log (np .max (u , 1 ))
1234
1235
beta = beta + reg * np .log (np .max (v , 1 ))
1235
1236
K = np .exp ((alpha [:, None ] + beta [None , :] -
@@ -1394,26 +1395,29 @@ def unmix(a, D, M, M0, h0, reg, reg0, alpha, numItermax=1000,
1394
1395
where :
1395
1396
1396
1397
- :math:`W_{M,reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance with M loss matrix (see ot.bregman.sinkhorn)
1397
- - :math:`\mathbf{a}` is an observed distribution, :math:`\mathbf{h}_0` is aprior on unmixing
1398
- - reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT data fitting
1399
- - reg0 and :math:`\mathbf{M0}` are respectively the regularization term and the cost matrix for regularization
1398
+ - :math: `\mathbf{D}` is a dictionary of `n_atoms` atoms of dimension `dim_a`, its expected shape is `(dim_a, n_atoms)`
1399
+ - :math:`\mathbf{h}` is the estimated unmixing of dimension `n_atoms`
1400
+ - :math:`\mathbf{a}` is an observed distribution of dimension `dim_a`
1401
+ - :math:`\mathbf{h}_0` is a prior on `h` of dimension `dim_prior`
1402
+ - reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix (dim_a, dim_a) for OT data fitting
1403
+ - reg0 and :math:`\mathbf{M0}` are respectively the regularization term and the cost matrix (dim_prior, n_atoms) regularization
1400
1404
- :math:`\\alpha`weight data fitting and regularization
1401
1405
1402
1406
The optimization problem is solved suing the algorithm described in [4]
1403
1407
1404
1408
1405
1409
Parameters
1406
1410
----------
1407
- a : ndarray, shape (n_observed )
1408
- observed distribution
1409
- D : ndarray, shape (dim, dim )
1411
+ a : ndarray, shape (dim_a )
1412
+ observed distribution (histogram, sums to 1)
1413
+ D : ndarray, shape (dim_a, n_atoms )
1410
1414
dictionary matrix
1411
- M : ndarray, shape (dim, dim )
1415
+ M : ndarray, shape (dim_a, dim_a )
1412
1416
loss matrix
1413
- M0 : ndarray, shape (n_observed, n_observed )
1417
+ M0 : ndarray, shape (n_atoms, dim_prior )
1414
1418
loss matrix
1415
- h0 : ndarray, shape (dim ,)
1416
- prior on h
1419
+ h0 : ndarray, shape (n_atoms ,)
1420
+ prior on the estimated unmixing h
1417
1421
reg : float
1418
1422
Regularization term >0 (Wasserstein data fitting)
1419
1423
reg0 : float
@@ -1432,7 +1436,7 @@ def unmix(a, D, M, M0, h0, reg, reg0, alpha, numItermax=1000,
1432
1436
1433
1437
Returns
1434
1438
-------
1435
- a : ndarray, shape (dim ,)
1439
+ h : ndarray, shape (n_atoms ,)
1436
1440
Wasserstein barycenter
1437
1441
log : dict
1438
1442
log dictionary return only if log==True in parameters
0 commit comments