You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
title = {Controlled Generation with Equivariant Variational Flow Matching},
3
+
author = {Eijkelboom, Floor and Zimmermann, Heiko and Bekkers, Erik and Welling, Max and Naesseth, Christian and {van de Meent}, Jan-Willem},
4
+
booktitle = {International Conference on Machine Learning},
5
+
year = {2025},
6
+
abbr = {ICML}
7
+
}
8
+
9
+
@inproceedings{guzmancordero2025exponential,
10
+
title = {Exponential Family Variational Flow Matching for Tabular Data Generation},
11
+
author = {{Guzm\'an-Cordero}*, Andr\'es and Eijkelboom*, Floor and {van de Meent}, Jan-Willem},
12
+
booktitle = {International Conference on Machine Learning},
13
+
year = {2025},
14
+
abbr = {ICML}
15
+
}
16
+
17
+
@inproceedings{zhdanov2025erwin,
18
+
title = {Erwin: {{A Tree-based Hierarchical Transformer}} for {{Large-scale Physical Systems}}},
19
+
shorttitle = {Erwin},
20
+
author = {Zhdanov, Maksim and Welling, Max and {van de Meent}, Jan-Willem},
21
+
booktitle = {International Conference on Machine Learning},
22
+
year = {2025},
23
+
number = {arXiv:2502.17019},
24
+
eprint = {2502.17019},
25
+
primaryclass = {cs},
26
+
publisher = {arXiv},
27
+
doi = {10.48550/arXiv.2502.17019},
28
+
urldate = {2025-05-06},
29
+
abstract = {Large-scale physical systems defined on irregular grids pose significant scalability challenges for deep learning methods, especially in the presence of long-range interactions and multi-scale coupling. Traditional approaches that compute all pairwise interactions, such as attention, become computationally prohibitive as they scale quadratically with the number of nodes. We present Erwin, a hierarchical transformer inspired by methods from computational many-body physics, which combines the efficiency of tree-based algorithms with the expressivity of attention mechanisms. Erwin employs ball tree partitioning to organize computation, which enables linear-time attention by processing nodes in parallel within local neighborhoods of fixed size. Through progressive coarsening and refinement of the ball tree structure, complemented by a novel cross-ball interaction mechanism, it captures both fine-grained local details and global features. We demonstrate Erwin's effectiveness across multiple domains, including cosmology, molecular dynamics, and particle fluid dynamics, where it consistently outperforms baseline methods both in accuracy and computational efficiency.},
title = {Learning {{Neural Free-Energy Functionals}} with {{Pair-Correlation Matching}}},
39
+
author = {Dijkman, Jacobus and Dijkstra, Marjolein and {van Roij}, Ren{\'e} and Welling, Max and {van de Meent}, Jan-Willem and Ensing, Bernd},
40
+
year = {2025},
41
+
month = feb,
42
+
journal = {Physical Review Letters},
43
+
volume = {134},
44
+
number = {5},
45
+
pages = {056103},
46
+
publisher = {American Physical Society},
47
+
doi = {10.1103/PhysRevLett.134.056103},
48
+
urldate = {2025-05-06},
49
+
abstract = {The intrinsic Helmholtz free-energy functional, the centerpiece of classical density functional theory, is at best only known approximately for 3D systems. Here we introduce a method for learning a neural-network approximation of this functional by exclusively training on a dataset of radial distribution functions, circumventing the need to sample costly heterogeneous density profiles in a wide variety of external potentials. For a supercritical Lennard-Jones system with planar symmetry, we demonstrate that the learned neural free-energy functional accurately predicts inhomogeneous density profiles under various complex external potentials obtained from simulations.},
50
+
abbr = {PRL},
51
+
html = {https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.134.056103},
52
+
pdf = {https://arxiv.org/pdf/2403.15007}
53
+
}
54
+
55
+
@inproceedings{biza25onrobot,
56
+
author = {Ondrej Biza and
57
+
Thomas Weng and
58
+
Lingfeng Sun and
59
+
Karl Schmeckpeper and
60
+
Tarik Kelestemur and
61
+
Yecheng Jason Ma and
62
+
Robert Platt and
63
+
Jan{-}Willem {van de Meent} and
64
+
Lawson L. S. Wong},
65
+
title = {On-Robot Reinforcement Learning with Goal-Contrastive Rewards},
66
+
booktitle = {Proceedings of the 2025 IEEE International Conference on Robotics and Automation, ICRA'25},
67
+
year = {2025},
68
+
abbr = {ICRA},
69
+
html = {https://arxiv.org/abs/2410.19989},
70
+
pdf = {https://arxiv.org/pdf/2410.19989}
71
+
}
72
+
73
+
@inproceedings{kunze2024practical,
74
+
title = {Practical Shuffle Coding},
75
+
booktitle = {Advances in Neural Information Processing Systems},
76
+
author = {Kunze, Julius and Severo, Daniel and {van de Meent}, Jan-Willem and Townsend, James},
77
+
editor = {Globerson, A. and Mackey, L. and Belgrave, D. and Fan, A. and Paquet, U. and Tomczak, J. and Zhang, C.},
78
+
year = {2024},
79
+
volume = {37},
80
+
pages = {84081--84113},
81
+
abbr = {NeurIPS},
82
+
publisher = {Curran Associates, Inc.},
83
+
html = {https://proceedings.neurips.cc/paper_files/paper/2024/hash/98d17a9632e1534bae96793e99dc3c2d-Abstract-Conference.html},
84
+
pdf = {https://proceedings.neurips.cc/paper_files/paper/2024/file/98d17a9632e1534bae96793e99dc3c2d-Paper-Conference.pdf}
85
+
}
86
+
87
+
@inproceedings{eijkelboom2024variational,
88
+
title = {Variational Flow Matching for Graph Generation},
89
+
booktitle = {Advances in Neural Information Processing Systems},
90
+
author = {Eijkelboom, Floor and Bartosh, Grigory and Naesseth, Christian A. and Welling, Max and {van de Meent}, Jan-Willem},
91
+
editor = {Globerson, A. and Mackey, L. and Belgrave, D. and Fan, A. and Paquet, U. and Tomczak, J. and Zhang, C.},
92
+
year = {2024},
93
+
volume = {37},
94
+
pages = {11735--11764},
95
+
publisher = {Curran Associates, Inc.},
96
+
abbr = {NeurIPS},
97
+
html = {https://proceedings.neurips.cc/paper_files/paper/2024/hash/15b780350b302a1bf9a3bd273f5c15a4-Abstract-Conference.html},
98
+
pdf = {https://proceedings.neurips.cc/paper_files/paper/2024/file/15b780350b302a1bf9a3bd273f5c15a4-Paper-Conference.pdf}
99
+
}
100
+
101
+
@inproceedings{zimmermann2024visa,
102
+
title = {{{VISA}}: {{Variational}} Inference with Sequential Sample-Average Approximations},
103
+
booktitle = {Advances in Neural Information Processing Systems},
104
+
author = {Zimmermann, Heiko and Naesseth, Christian A. and {van de Meent}, Jan-Willem},
105
+
editor = {Globerson, A. and Mackey, L. and Belgrave, D. and Fan, A. and Paquet, U. and Tomczak, J. and Zhang, C.},
106
+
year = {2024},
107
+
volume = {37},
108
+
pages = {138789--138808},
109
+
publisher = {Curran Associates, Inc.},
110
+
abbr = {NeurIPS},
111
+
html = {https://proceedings.neurips.cc/paper_files/paper/2024/hash/fa948624dfde013671e72c1a7ca4aebc-Abstract-Conference.html},
112
+
pdf = {https://proceedings.neurips.cc/paper_files/paper/2024/file/fa948624dfde013671e72c1a7ca4aebc-Paper-Conference.pdf}
113
+
}
114
+
115
+
@inproceedings{mcinerney2024reducing,
116
+
title = {Towards {{Reducing Diagnostic Errors}} with {{Interpretable Risk Prediction}}},
117
+
author = {McInerney, Denis Jered and Dickinson, William and Flynn, Lucy C. and Young, Andrea C. and Young, Geoffrey S. and {van de Meent}, Jan-Willem and Wallace, Byron C.},
118
+
booktitle = {2024 Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)},
119
+
year = {2024},
120
+
abstract = {Many diagnostic errors occur because clinicians cannot easily access relevant information in patient Electronic Health Records (EHRs). In this work we propose a method to use LLMs to identify pieces of evidence in patient EHR data that indicate increased or decreased risk of specific diagnoses; our ultimate aim is to increase access to evidence and reduce diagnostic errors. In particular, we propose a Neural Additive Model to make predictions backed by evidence with individualized risk estimates at time-points where clinicians are still uncertain, aiming to specifically mitigate delays in diagnosis and errors stemming from an incomplete differential. To train such a model, it is necessary to infer temporally fine-grained retrospective labels of eventual "true" diagnoses. We do so with LLMs, to ensure that the input text is from before a confident diagnosis can be made. We use an LLM to retrieve an initial pool of evidence, but then refine this set of evidence according to correlations learned by the model. We conduct an in-depth evaluation of the usefulness of our approach by simulating how it might be used by a clinician to decide between a pre-defined list of differential diagnoses.},
121
+
abbr = {NAACL},
122
+
html = {https://aclanthology.org/2024.naacl-long.399/},
123
+
pdf = {https://aclanthology.org/2024.naacl-long.399.pdf}
124
+
}
125
+
126
+
1
127
@inproceedings{kunze2024entropy,
2
128
title={Entropy Coding of Unordered Data Structures},
3
129
author={Julius Kunze and Daniel Severo and Giulio Zani and Jan-Willem {van de Meent} and James Townsend},
4
130
booktitle={International Conference on Learning Representations (ICLR)},
abstract = {A growing body of research on probabilistic programs and causal models has highlighted the need to reason compositionally about model classes that extend directed graphical models. Both probabilistic programs and causal models define a joint probability density over a set of random variables, and exhibit sparse structure that can be used to reason about causation and conditional independence. This work builds on recent work on Markov categories of probabilistic mappings to define a category whose morphisms combine a joint density, factorized over each sample space, with a deterministic mapping from samples to return values. This is a step towards closing the gap between recent category-theoretic descriptions of probability measures, and the operational definitions of factorized densities that are commonly employed in probabilistic programming and causal inference.},
html = {https://www.sciencedirect.com/science/article/pii/S0301051121002350},
170
300
urldate = {2022-04-22},
171
301
abstract = {The brain regulates the body by anticipating its needs and attempting to meet them before they arise – a process called allostasis. Allostasis requires a model of the changing sensory conditions within the body, a process called interoception. In this paper, we examine how interoception may provide performance feedback for allostasis. We suggest studying allostasis in terms of control theory, reviewing control theory’s applications to related issues in physiology, motor control, and decision making. We synthesize these by relating them to the important properties of allostatic regulation as a control problem. We then sketch a novel formalism for how the brain might perform allostatic control of the viscera by analogy to skeletomotor control, including a mathematical view on how interoception acts as performance feedback for allostasis. Finally, we suggest ways to test implications of our hypotheses.},
Copy file name to clipboardExpand all lines: _people/JanWillemVanDeMeent.md
+7-5Lines changed: 7 additions & 5 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -8,16 +8,15 @@ office: Science Park, Lab 42, L4.13
8
8
lab: AMLab and Delta Lab
9
9
institute: Informatics Institute
10
10
university: University of Amsterdam
11
-
one_liner: Probabilistic programming, inference, deep learning, and their applications.
11
+
one_liner: AI for scalable and data-efficient scientific computation.
12
12
description: |
13
-
Dr. Jan-Willem van de Meent is an Associate Professor (Universitair Hoofddocent) at the University of Amsterdam. He co-directs the [AMLab](https://amlab.science.uva.nl/) with Max Welling and co-directs the [Uva Bosch Delta Lab](https://ivi.fnwi.uva.nl/uvaboschdeltalab/) with Theo Gevers. He previously held a position as an Assistant Professor at Northeastern University, where he continues to co-advise and collaborate. Prior to becoming faculty at Northeastern, he held a postdoctoral position with Frank Wood at Oxford, as well as a postdoctoral position with Chris Wiggins and Ruben Gonzalez at Columbia University. He carried out his PhD research in biophysics at Leiden and Cambridge with Wim van Saarloos and Ray Goldstein.
13
+
Dr. Jan-Willem van de Meent is an Associate Professor (Universitair Hoofddocent) at the University of Amsterdam. He directs the [AMLab](https://amlab.science.uva.nl/), co-directs the [Uva Bosch Delta Lab](https://ivi.fnwi.uva.nl/uvaboschdeltalab/), and directs the [Amsterdam ELLIS Unit](https://ivi.fnwi.uva.nl/ellis/). He previously held a position as an Assistant Professor at Northeastern University, where he continues to co-advise and collaborate. Prior to becoming faculty at Northeastern, he held a postdoctoral position with Frank Wood at Oxford, as well as a postdoctoral position with Chris Wiggins and Ruben Gonzalez at Columbia University. He carried out his PhD research in biophysics at Leiden and Cambridge with Wim van Saarloos and Ray Goldstein. He served as a founding co-chair of the international conference on probabilistic programming ([PROBPROG](https://probprog.cc/)) and served as a program chair for the international conference on artificial intelligence and statistics ([AISTATS](https://aistats.org/aistats2023/)). He was the recipient of numerous grants, including an NWO Rubicon Fellowship and of an NSF CAREER award.
14
+
15
+
Jan-Willem van de Meent’s research seeks to understand what methods in AI have the potential to generalize across diverse application domains, and how we can think compositionally about such methods. One aspect of his work focuses on methods development in generative AI, deep learning, and probabilistic programming. He also collaborates extensively in a range of application domains. In the past he has worked on problems in biophysics, neuroscience, healthcare, and robotics. His current collaborations focus on physical chemistry, fluid mechanics, and materials science. The two problems he currently cares about most are uses of AI to make scientific computation more scalable, and maximizing data-efficiency of AI methods in the context of scientific domains.
14
16
15
-
Jan-Willem van de Meent’s group develops models for artificial intelligence by combining probabilistic programming and deep learning. A major theme in this work is understanding how we can develop data-efficient models in machine learning by incorporating knowledge of an underlying physical system, causal structure, or symmetries of the underlying domain. At a technical level, his group develops inference methods for probabilistic programming systems. He is one of the creators of [Anglican](https://probprog.github.io/anglican/), a probabilistic language based on Clojure and of [Probabilistic Torch](https://github.com/probtorch/probtorch), a library for deep generative models that extends PyTorch. He is also an author on a forthcoming book on probabilistic programming, a draft of which is available on arXiv. To ground methodological work in practice, his group collaborates with researchers in neuroscience, NLP, healthcare, robotics, physics, and chemistry.
16
17
17
-
Jan-Willem van de Meent served as a founding co-chair of the international conference on probabilistic programming ([PROBPROG](https://probprog.cc/)). He is currently serving as a program chair for the international conference on artificial intelligence and statistics ([AISTATS](https://aistats.org/aistats2023/)). He was the recipient of an NWO Rubicon Fellowship and of an NSF CAREER award.
0 commit comments