Skip to content

Commit eaad56c

Browse files
author
Samuel Burbulla
committed
Wrap FNO from NVIDIA Modulus.
1 parent f07fe22 commit eaad56c

File tree

4 files changed

+130
-0
lines changed

4 files changed

+130
-0
lines changed
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
"""
2+
`continuiti.operators.modulus`
3+
4+
Operators from NVIDIA Modulus wrapped in continuiti.
5+
"""
6+
7+
# Test if we can import NVIDIA Modulus
8+
try:
9+
import modulus # noqa: F40
10+
except ImportError:
11+
raise ImportError("NVIDIA Modulus not found!")
12+
13+
from .fno import FNO
14+
15+
__all__ = [
16+
"FNO",
17+
]
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
"""
2+
`continuiti.operators.modulus.fno`
3+
4+
The Fourier Neural Operator from NVIDIA Modulus wrapped in continuiti.
5+
"""
6+
7+
import torch
8+
from typing import Optional
9+
from continuiti.operators import Operator, OperatorShapes
10+
from modulus.models.fno import FNO as FNOModulus
11+
12+
13+
class FNO(Operator):
14+
r"""FNO architecture from NVIDIA Modulus.
15+
16+
The `in_channels` and `out_channels` arguments are determined by the
17+
`shapes` argument. The `dimension` is set to the dimension of the input
18+
coordinates, assuming that the grid dimension is the same as the coordinate
19+
dimension of `x`.
20+
21+
All other keyword arguments are passed to the Fourier Neural Operator, please refer
22+
to the documentation of the `modulus.model.fno.FNO` class for more information.
23+
24+
Args:
25+
shapes: Shapes of the input and output data.
26+
device: Device.
27+
**kwargs: Additional arguments for the Fourier layers.
28+
"""
29+
30+
def __init__(
31+
self,
32+
shapes: OperatorShapes,
33+
device: Optional[torch.device] = None,
34+
dimension: Optional[int] = None,
35+
**kwargs,
36+
):
37+
super().__init__(shapes, device)
38+
39+
if dimension is None:
40+
# Per default, use coordinate dimension
41+
dimension = shapes.x.dim
42+
43+
self.fno = FNOModulus(
44+
in_channels=shapes.u.dim,
45+
out_channels=shapes.v.dim,
46+
dimension=dimension,
47+
**kwargs,
48+
)
49+
self.fno.to(device)
50+
51+
def forward(
52+
self, x: torch.Tensor, u: torch.Tensor, y: torch.Tensor
53+
) -> torch.Tensor:
54+
r"""Forward pass of the Fourier Neural Operator.
55+
56+
Args:
57+
x: Ignored.
58+
u: Input function values of shape (batch_size, u_dim, num_sensors...).
59+
y: Ignored.
60+
"""
61+
return self.fno(u)

tests/operators/modulus/__init__.py

Whitespace-only changes.
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import pytest
2+
from continuiti.benchmarks.sine import SineBenchmark
3+
from continuiti.trainer import Trainer
4+
from continuiti.operators.losses import MSELoss
5+
6+
7+
@pytest.mark.slow
8+
def test_modulus_fno():
9+
try:
10+
from continuiti.operators.modulus import FNO
11+
except ImportError:
12+
pytest.skip("NVIDIA Modulus not found!")
13+
14+
# Data set
15+
benchmark = SineBenchmark(n_train=1)
16+
dataset = benchmark.train_dataset
17+
18+
# Operator
19+
# Configured like the default continuiti `FourierNeuralOperator`
20+
# with depth=3 and width=3 as in `test_fno.py`.
21+
operator = FNO(
22+
dataset.shapes,
23+
decoder_layers=1,
24+
decoder_layer_size=1,
25+
decoder_activation_fn="identity",
26+
num_fno_layers=3, # "depth" in FourierNeuralOperator
27+
latent_channels=3, # "width" in FourierNeuralOperator
28+
num_fno_modes=dataset.shapes.u.size[0] // 2 + 1,
29+
padding=0,
30+
coord_features=False,
31+
)
32+
33+
# Train
34+
Trainer(operator, device="cpu").fit(dataset, tol=1e-12, epochs=10_000)
35+
36+
# Check solution
37+
x, u, y, v = dataset.x, dataset.u, dataset.y, dataset.v
38+
assert MSELoss()(operator, x, u, y, v) < 1e-12
39+
40+
41+
# SineBenchmark(n_train=1024, n_sensors=128, n_evaluations=128), epochs=100
42+
43+
# NVIDIA Modulus FNO
44+
# Parameters: 3560 Device: cpu
45+
# Epoch 100/100 Step 32/32 [====================] 6ms/step [0:19min<0:00min] - loss/train = 6.3876e-05
46+
47+
# continuiti FNO
48+
# Parameters: 3556 Device: cpu
49+
# Epoch 100/100 Step 32/32 [====================] 3ms/step [0:10min<0:00min] - loss/train = 1.4440e-04
50+
51+
# -> continuiti FNO is 2x faster than NVIDIA Modulus FNO
52+
# -> NVIDIA Modulus FNO can not handle different number of sensors and evaluations

0 commit comments

Comments
 (0)