@@ -38,22 +38,20 @@ def coordinate_grad_semi_dual(b, M, reg, beta, i):
38
38
39
39
Parameters
40
40
----------
41
-
42
- b : np.ndarray(nt,)
43
- target measure
44
- M : np.ndarray(ns, nt)
45
- cost matrix
46
- reg : float nu
47
- Regularization term > 0
48
- v : np.ndarray(nt,)
49
- dual variable
50
- i : number int
51
- picked number i
41
+ b : ndarray, shape (nt,)
42
+ Target measure.
43
+ M : ndarray, shape (ns, nt)
44
+ Cost matrix.
45
+ reg : float
46
+ Regularization term > 0.
47
+ v : ndarray, shape (nt,)
48
+ Dual variable.
49
+ i : int
50
+ Picked number i.
52
51
53
52
Returns
54
53
-------
55
-
56
- coordinate gradient : np.ndarray(nt,)
54
+ coordinate gradient : ndarray, shape (nt,)
57
55
58
56
Examples
59
57
--------
@@ -78,14 +76,11 @@ def coordinate_grad_semi_dual(b, M, reg, beta, i):
78
76
79
77
References
80
78
----------
81
-
82
79
[Genevay et al., 2016] :
83
- Stochastic Optimization for Large-scale Optimal Transport,
84
- Advances in Neural Information Processing Systems (2016),
85
- arXiv preprint arxiv:1605.08527.
86
-
80
+ Stochastic Optimization for Large-scale Optimal Transport,
81
+ Advances in Neural Information Processing Systems (2016),
82
+ arXiv preprint arxiv:1605.08527.
87
83
'''
88
-
89
84
r = M [i , :] - beta
90
85
exp_beta = np .exp (- r / reg ) * b
91
86
khi = exp_beta / (np .sum (exp_beta ))
@@ -121,24 +116,23 @@ def sag_entropic_transport(a, b, M, reg, numItermax=10000, lr=None):
121
116
Parameters
122
117
----------
123
118
124
- a : np. ndarray(ns,),
125
- source measure
126
- b : np. ndarray(nt,),
127
- target measure
128
- M : np. ndarray(ns, nt),
129
- cost matrix
130
- reg : float number,
119
+ a : ndarray, shape (ns,),
120
+ Source measure.
121
+ b : ndarray, shape (nt,),
122
+ Target measure.
123
+ M : ndarray, shape (ns, nt),
124
+ Cost matrix.
125
+ reg : float
131
126
Regularization term > 0
132
- numItermax : int number
133
- number of iteration
134
- lr : float number
135
- learning rate
127
+ numItermax : int
128
+ Number of iteration.
129
+ lr : float
130
+ Learning rate.
136
131
137
132
Returns
138
133
-------
139
-
140
- v : np.ndarray(nt,)
141
- dual variable
134
+ v : ndarray, shape (nt,)
135
+ Dual variable.
142
136
143
137
Examples
144
138
--------
@@ -213,23 +207,20 @@ def averaged_sgd_entropic_transport(a, b, M, reg, numItermax=300000, lr=None):
213
207
214
208
Parameters
215
209
----------
216
-
217
- b : np.ndarray(nt,)
210
+ b : ndarray, shape (nt,)
218
211
target measure
219
- M : np. ndarray(ns, nt)
212
+ M : ndarray, shape (ns, nt)
220
213
cost matrix
221
- reg : float number
214
+ reg : float
222
215
Regularization term > 0
223
- numItermax : int number
224
- number of iteration
225
- lr : float number
226
- learning rate
227
-
216
+ numItermax : int
217
+ Number of iteration.
218
+ lr : float
219
+ Learning rate.
228
220
229
221
Returns
230
222
-------
231
-
232
- ave_v : np.ndarray(nt,)
223
+ ave_v : ndarray, shape (nt,)
233
224
dual variable
234
225
235
226
Examples
@@ -256,9 +247,9 @@ def averaged_sgd_entropic_transport(a, b, M, reg, numItermax=300000, lr=None):
256
247
----------
257
248
258
249
[Genevay et al., 2016] :
259
- Stochastic Optimization for Large-scale Optimal Transport,
260
- Advances in Neural Information Processing Systems (2016),
261
- arXiv preprint arxiv:1605.08527.
250
+ Stochastic Optimization for Large-scale Optimal Transport,
251
+ Advances in Neural Information Processing Systems (2016),
252
+ arXiv preprint arxiv:1605.08527.
262
253
'''
263
254
264
255
if lr is None :
@@ -298,21 +289,19 @@ def c_transform_entropic(b, M, reg, beta):
298
289
299
290
Parameters
300
291
----------
301
-
302
- b : np.ndarray(nt,)
303
- target measure
304
- M : np.ndarray(ns, nt)
305
- cost matrix
292
+ b : ndarray, shape (nt,)
293
+ Target measure
294
+ M : ndarray, shape (ns, nt)
295
+ Cost matrix
306
296
reg : float
307
- regularization term > 0
308
- v : np. ndarray(nt,)
309
- dual variable
297
+ Regularization term > 0
298
+ v : ndarray, shape (nt,)
299
+ Dual variable.
310
300
311
301
Returns
312
302
-------
313
-
314
- u : np.ndarray(ns,)
315
- dual variable
303
+ u : ndarray, shape (ns,)
304
+ Dual variable.
316
305
317
306
Examples
318
307
--------
@@ -338,9 +327,9 @@ def c_transform_entropic(b, M, reg, beta):
338
327
----------
339
328
340
329
[Genevay et al., 2016] :
341
- Stochastic Optimization for Large-scale Optimal Transport,
342
- Advances in Neural Information Processing Systems (2016),
343
- arXiv preprint arxiv:1605.08527.
330
+ Stochastic Optimization for Large-scale Optimal Transport,
331
+ Advances in Neural Information Processing Systems (2016),
332
+ arXiv preprint arxiv:1605.08527.
344
333
'''
345
334
346
335
n_source = np .shape (M )[0 ]
@@ -382,31 +371,30 @@ def solve_semi_dual_entropic(a, b, M, reg, method, numItermax=10000, lr=None,
382
371
Parameters
383
372
----------
384
373
385
- a : np. ndarray(ns,)
374
+ a : ndarray, shape (ns,)
386
375
source measure
387
- b : np. ndarray(nt,)
376
+ b : ndarray, shape (nt,)
388
377
target measure
389
- M : np. ndarray(ns, nt)
378
+ M : ndarray, shape (ns, nt)
390
379
cost matrix
391
- reg : float number
380
+ reg : float
392
381
Regularization term > 0
393
382
methode : str
394
383
used method (SAG or ASGD)
395
- numItermax : int number
384
+ numItermax : int
396
385
number of iteration
397
- lr : float number
386
+ lr : float
398
387
learning rate
399
- n_source : int number
388
+ n_source : int
400
389
size of the source measure
401
- n_target : int number
390
+ n_target : int
402
391
size of the target measure
403
392
log : bool, optional
404
393
record log if True
405
394
406
395
Returns
407
396
-------
408
-
409
- pi : np.ndarray(ns, nt)
397
+ pi : ndarray, shape (ns, nt)
410
398
transportation matrix
411
399
log : dict
412
400
log dictionary return only if log==True in parameters
@@ -495,30 +483,28 @@ def batch_grad_dual(a, b, M, reg, alpha, beta, batch_size, batch_alpha,
495
483
496
484
Parameters
497
485
----------
498
-
499
- a : np.ndarray(ns,)
486
+ a : ndarray, shape (ns,)
500
487
source measure
501
- b : np. ndarray(nt,)
488
+ b : ndarray, shape (nt,)
502
489
target measure
503
- M : np. ndarray(ns, nt)
490
+ M : ndarray, shape (ns, nt)
504
491
cost matrix
505
- reg : float number
492
+ reg : float
506
493
Regularization term > 0
507
- alpha : np. ndarray(ns,)
494
+ alpha : ndarray, shape (ns,)
508
495
dual variable
509
- beta : np. ndarray(nt,)
496
+ beta : ndarray, shape (nt,)
510
497
dual variable
511
- batch_size : int number
498
+ batch_size : int
512
499
size of the batch
513
- batch_alpha : np. ndarray(bs,)
500
+ batch_alpha : ndarray, shape (bs,)
514
501
batch of index of alpha
515
- batch_beta : np. ndarray(bs,)
502
+ batch_beta : ndarray, shape (bs,)
516
503
batch of index of beta
517
504
518
505
Returns
519
506
-------
520
-
521
- grad : np.ndarray(ns,)
507
+ grad : ndarray, shape (ns,)
522
508
partial grad F
523
509
524
510
Examples
@@ -591,28 +577,26 @@ def sgd_entropic_regularization(a, b, M, reg, batch_size, numItermax, lr):
591
577
592
578
Parameters
593
579
----------
594
-
595
- a : np.ndarray(ns,)
580
+ a : ndarray, shape (ns,)
596
581
source measure
597
- b : np. ndarray(nt,)
582
+ b : ndarray, shape (nt,)
598
583
target measure
599
- M : np. ndarray(ns, nt)
584
+ M : ndarray, shape (ns, nt)
600
585
cost matrix
601
- reg : float number
586
+ reg : float
602
587
Regularization term > 0
603
- batch_size : int number
588
+ batch_size : int
604
589
size of the batch
605
- numItermax : int number
590
+ numItermax : int
606
591
number of iteration
607
- lr : float number
592
+ lr : float
608
593
learning rate
609
594
610
595
Returns
611
596
-------
612
-
613
- alpha : np.ndarray(ns,)
597
+ alpha : ndarray, shape (ns,)
614
598
dual variable
615
- beta : np. ndarray(nt,)
599
+ beta : ndarray, shape (nt,)
616
600
dual variable
617
601
618
602
Examples
@@ -648,10 +632,9 @@ def sgd_entropic_regularization(a, b, M, reg, batch_size, numItermax, lr):
648
632
649
633
References
650
634
----------
651
-
652
635
[Seguy et al., 2018] :
653
- International Conference on Learning Representation (2018),
654
- arXiv preprint arxiv:1711.02283.
636
+ International Conference on Learning Representation (2018),
637
+ arXiv preprint arxiv:1711.02283.
655
638
'''
656
639
657
640
n_source = np .shape (M )[0 ]
@@ -696,28 +679,26 @@ def solve_dual_entropic(a, b, M, reg, batch_size, numItermax=10000, lr=1,
696
679
697
680
Parameters
698
681
----------
699
-
700
- a : np.ndarray(ns,)
682
+ a : ndarray, shape (ns,)
701
683
source measure
702
- b : np. ndarray(nt,)
684
+ b : ndarray, shape (nt,)
703
685
target measure
704
- M : np. ndarray(ns, nt)
686
+ M : ndarray, shape (ns, nt)
705
687
cost matrix
706
- reg : float number
688
+ reg : float
707
689
Regularization term > 0
708
- batch_size : int number
690
+ batch_size : int
709
691
size of the batch
710
- numItermax : int number
692
+ numItermax : int
711
693
number of iteration
712
- lr : float number
694
+ lr : float
713
695
learning rate
714
696
log : bool, optional
715
697
record log if True
716
698
717
699
Returns
718
700
-------
719
-
720
- pi : np.ndarray(ns, nt)
701
+ pi : ndarray, shape (ns, nt)
721
702
transportation matrix
722
703
log : dict
723
704
log dictionary return only if log==True in parameters
@@ -757,8 +738,8 @@ def solve_dual_entropic(a, b, M, reg, batch_size, numItermax=10000, lr=1,
757
738
----------
758
739
759
740
[Seguy et al., 2018] :
760
- International Conference on Learning Representation (2018),
761
- arXiv preprint arxiv:1711.02283.
741
+ International Conference on Learning Representation (2018),
742
+ arXiv preprint arxiv:1711.02283.
762
743
'''
763
744
764
745
opt_alpha , opt_beta = sgd_entropic_regularization (a , b , M , reg , batch_size ,
0 commit comments