Skip to content

Commit 1059248

Browse files
authored
Merge branch 'mir-group:main' into stratified_metrics
2 parents 2ae5d7f + d3a7763 commit 1059248

File tree

7 files changed

+30
-19
lines changed

7 files changed

+30
-19
lines changed

.github/workflows/tests.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ jobs:
3131
run: |
3232
python -m pip install --upgrade pip
3333
pip install setuptools wheel
34+
if [ ${TORCH} = "1.13.1" ]; then pip install numpy==1.*; fi # older torch versions fail with numpy 2
3435
pip install torch==${TORCH} -f https://download.pytorch.org/whl/cpu/torch_stable.html
3536
pip install h5py scikit-learn # install packages that aren't required dependencies but that the tests do need
3637
pip install --upgrade-strategy only-if-needed .

CHANGELOG.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77
Most recent change on the bottom.
88

99

10-
## Unreleased - 0.6.1
10+
## Unreleased
11+
12+
13+
## [0.6.1] - 2024-7-9
1114
### Added
1215
- add support for equivariance testing of arbitrary Cartesian tensor outputs
1316
- [Breaking] use entry points for `nequip.extension`s (e.g. for field registration)

nequip/__init__.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,16 +11,14 @@
1111
torch_version = packaging.version.parse(torch.__version__)
1212

1313
# only allow 1.11*, 1.13* or higher (no 1.12.*)
14-
assert (torch_version > packaging.version.parse("1.11.0")) and not (
15-
packaging.version.parse("1.12.0")
16-
<= torch_version
17-
< packaging.version.parse("1.13.0")
14+
assert (torch_version == packaging.version.parse("1.11")) or (
15+
torch_version >= packaging.version.parse("1.13")
1816
), f"NequIP supports PyTorch 1.11.* or 1.13.* or later, but {torch_version} found"
1917

2018
# warn if using 1.13* or 2.0.*
21-
if packaging.version.parse("1.13.0") <= torch_version < packaging.version.parse("2.1"):
19+
if packaging.version.parse("1.13.0") <= torch_version:
2220
warnings.warn(
23-
f"!! PyTorch version {torch_version} found. Upstream issues in PyTorch versions 1.13.* and 2.0.* have been seen to cause unusual performance degredations on some CUDA systems that become worse over time; see https://github.com/mir-group/nequip/discussions/311. The best tested PyTorch version to use with CUDA devices is 1.11; while using other versions if you observe this problem, an unexpected lack of this problem, or other strange behavior, please post in the linked GitHub issue."
21+
f"!! PyTorch version {torch_version} found. Upstream issues in PyTorch versions 1.13.* and 2.* have been seen to cause unusual performance degredations on some CUDA systems that become worse over time; see https://github.com/mir-group/nequip/discussions/311. The best tested PyTorch version to use with CUDA devices is 1.11; while using other versions if you observe this problem, an unexpected lack of this problem, or other strange behavior, please post in the linked GitHub issue."
2422
)
2523

2624

nequip/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22
# See Python packaging guide
33
# https://packaging.python.org/guides/single-sourcing-package-version/
44

5-
__version__ = "0.6.0"
5+
__version__ = "0.6.1"

nequip/utils/test.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import torch
44
from e3nn import o3
5-
from e3nn.util.test import equivariance_error, FLOAT_TOLERANCE
5+
from e3nn.util.test import equivariance_error
66

77
from nequip.nn import GraphModuleMixin, GraphModel
88
from nequip.data import (
@@ -12,7 +12,17 @@
1212
_EDGE_FIELDS,
1313
_CARTESIAN_TENSOR_FIELDS,
1414
)
15-
15+
from nequip.utils.misc import dtype_from_name
16+
17+
# The default float tolerance
18+
FLOAT_TOLERANCE = {
19+
t: torch.as_tensor(v, dtype=dtype_from_name(t))
20+
for t, v in {"float32": 1e-3, "float64": 1e-10}.items()
21+
}
22+
# Allow lookup by name or dtype object:
23+
for t, v in list(FLOAT_TOLERANCE.items()):
24+
FLOAT_TOLERANCE[dtype_from_name(t)] = v
25+
del t, v
1626

1727
# This has to be somewhat large because of float32 sum reductions over many edges/atoms
1828
PERMUTATION_FLOAT_TOLERANCE = {torch.float32: 1e-4, torch.float64: 1e-10}

nequip/utils/unittests/conftest.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,11 @@
1212

1313
import torch
1414

15-
from nequip.utils.test import set_irreps_debug
15+
from nequip.utils.test import set_irreps_debug, FLOAT_TOLERANCE
1616
from nequip.data import AtomicData, ASEDataset
1717
from nequip.data.transforms import TypeMapper
1818
from nequip.utils.torch_geometric import Batch
1919
from nequip.utils._global_options import _set_global_options
20-
from nequip.utils.misc import dtype_from_name
2120

2221
# Sometimes we run parallel using pytest-xdist, and want to be able to use
2322
# as many GPUs as are available
@@ -42,12 +41,6 @@
4241
# Test parallelization, but don't waste time spawning tons of workers if lots of cores available
4342
os.environ["NEQUIP_NUM_TASKS"] = "2"
4443

45-
# The default float tolerance
46-
FLOAT_TOLERANCE = {
47-
t: torch.as_tensor(v, dtype=dtype_from_name(t))
48-
for t, v in {"float32": 1e-3, "float64": 1e-10}.items()
49-
}
50-
5144

5245
@pytest.fixture(scope="session", autouse=True, params=["float32", "float64"])
5346
def float_tolerance(request):

nequip/utils/unittests/model_tests.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,13 @@ def test_equivariance(self, model, atomic_batch, device):
228228
instance, out_fields = model
229229
instance = instance.to(device=device)
230230
atomic_batch = atomic_batch.to(device=device)
231-
assert_AtomicData_equivariant(func=instance, data_in=atomic_batch)
231+
assert_AtomicData_equivariant(
232+
func=instance,
233+
data_in=atomic_batch,
234+
e3_tolerance={torch.float32: 1e-3, torch.float64: 1e-8}[
235+
torch.get_default_dtype()
236+
],
237+
)
232238

233239
def test_embedding_cutoff(self, model, config, device):
234240
instance, out_fields = model

0 commit comments

Comments
 (0)