|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +""" |
| 3 | +=============================================================== |
| 4 | +Translation Invariant Sinkhorn for Unbalanced Optimal Transport |
| 5 | +=============================================================== |
| 6 | +
|
| 7 | +This examples illustrates the better convergence of the translation |
| 8 | +invariance Sinkhorn algorithm proposed in [73] compared to the classical |
| 9 | +Sinkhorn algorithm. |
| 10 | +
|
| 11 | +[73] Séjourné, T., Vialard, F. X., & Peyré, G. (2022). |
| 12 | +Faster unbalanced optimal transport: Translation invariant sinkhorn and 1-d frank-wolfe. |
| 13 | +In International Conference on Artificial Intelligence and Statistics (pp. 4995-5021). PMLR. |
| 14 | +
|
| 15 | +""" |
| 16 | + |
| 17 | +# Author: Clément Bonet <[email protected]> |
| 18 | +# License: MIT License |
| 19 | + |
| 20 | +import numpy as np |
| 21 | +import matplotlib.pylab as pl |
| 22 | +import ot |
| 23 | + |
| 24 | +############################################################################## |
| 25 | +# Setting parameters |
| 26 | +# ------------- |
| 27 | + |
| 28 | +# %% parameters |
| 29 | + |
| 30 | +n_iter = 50 # nb iters |
| 31 | +n = 40 # nb samples |
| 32 | + |
| 33 | +num_iter_max = 100 |
| 34 | +n_noise = 10 |
| 35 | + |
| 36 | +reg = 0.005 |
| 37 | +reg_m_kl = 0.05 |
| 38 | + |
| 39 | +mu_s = np.array([-1, -1]) |
| 40 | +cov_s = np.array([[1, 0], [0, 1]]) |
| 41 | + |
| 42 | +mu_t = np.array([4, 4]) |
| 43 | +cov_t = np.array([[1, -.8], [-.8, 1]]) |
| 44 | + |
| 45 | + |
| 46 | +############################################################################## |
| 47 | +# Compute entropic kl-regularized UOT with Sinkhorn and Translation Invariant Sinkhorn |
| 48 | +# ----------- |
| 49 | + |
| 50 | +err_sinkhorn_uot = np.empty((n_iter, num_iter_max)) |
| 51 | +err_sinkhorn_uot_ti = np.empty((n_iter, num_iter_max)) |
| 52 | + |
| 53 | + |
| 54 | +for seed in range(n_iter): |
| 55 | + np.random.seed(seed) |
| 56 | + xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s) |
| 57 | + xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t) |
| 58 | + |
| 59 | + xs = np.concatenate((xs, ((np.random.rand(n_noise, 2) - 4))), axis=0) |
| 60 | + xt = np.concatenate((xt, ((np.random.rand(n_noise, 2) + 6))), axis=0) |
| 61 | + |
| 62 | + n = n + n_noise |
| 63 | + |
| 64 | + a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples |
| 65 | + |
| 66 | + # loss matrix |
| 67 | + M = ot.dist(xs, xt) |
| 68 | + M /= M.max() |
| 69 | + |
| 70 | + entropic_kl_uot, log_uot = ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg, reg_m_kl, reg_type="kl", log=True, numItermax=num_iter_max, stopThr=0) |
| 71 | + entropic_kl_uot_ti, log_uot_ti = ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg, reg_m_kl, reg_type="kl", |
| 72 | + method="sinkhorn_translation_invariant", log=True, |
| 73 | + numItermax=num_iter_max, stopThr=0) |
| 74 | + |
| 75 | + err_sinkhorn_uot[seed] = log_uot["err"] |
| 76 | + err_sinkhorn_uot_ti[seed] = log_uot_ti["err"] |
| 77 | + |
| 78 | +############################################################################## |
| 79 | +# Plot the results |
| 80 | +# ---------------- |
| 81 | + |
| 82 | +mean_sinkh = np.mean(err_sinkhorn_uot, axis=0) |
| 83 | +std_sinkh = np.std(err_sinkhorn_uot, axis=0) |
| 84 | + |
| 85 | +mean_sinkh_ti = np.mean(err_sinkhorn_uot_ti, axis=0) |
| 86 | +std_sinkh_ti = np.std(err_sinkhorn_uot_ti, axis=0) |
| 87 | + |
| 88 | +absc = list(range(num_iter_max)) |
| 89 | + |
| 90 | +pl.plot(absc, mean_sinkh, label="Sinkhorn") |
| 91 | +pl.fill_between(absc, mean_sinkh - 2 * std_sinkh, mean_sinkh + 2 * std_sinkh, alpha=0.5) |
| 92 | + |
| 93 | +pl.plot(absc, mean_sinkh_ti, label="Translation Invariant Sinkhorn") |
| 94 | +pl.fill_between(absc, mean_sinkh_ti - 2 * std_sinkh_ti, mean_sinkh_ti + 2 * std_sinkh_ti, alpha=0.5) |
| 95 | + |
| 96 | +pl.yscale("log") |
| 97 | +pl.legend() |
| 98 | +pl.xlabel("Number of Iterations") |
| 99 | +pl.ylabel(r"$\|u-v\|_\infty$") |
| 100 | +pl.grid(True) |
| 101 | +pl.show() |
0 commit comments