Skip to content

Commit 5b65df4

Browse files
committed
Merge remote-tracking branch 'origin/main' into main
2 parents cd3323b + 696e154 commit 5b65df4

File tree

2 files changed

+205
-0
lines changed

2 files changed

+205
-0
lines changed

examples/steady-state-poisson.py

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
import math
2+
import matplotlib.pyplot as plt
3+
import tensorflow as tf
4+
import tensordiffeq as tdq
5+
from tensordiffeq.boundaries import *
6+
from tensordiffeq.models import CollocationSolverND
7+
from tensorflow.math import sin
8+
from tensordiffeq.utils import constant
9+
10+
Domain = DomainND(["x", "y"])
11+
12+
Domain.add("x", [0, 1.0], 11)
13+
Domain.add("y", [0, 1.0], 11)
14+
15+
N_f = 100
16+
Domain.generate_collocation_points(N_f)
17+
18+
19+
def f_model(u_model, x, y):
20+
u = u_model(tf.concat([x, y], 1))
21+
u_x = tf.gradients(u, x)[0]
22+
u_y = tf.gradients(u, y)[0]
23+
u_xx = tf.gradients(u_x, x)[0]
24+
u_yy = tf.gradients(u_y, y)[0]
25+
26+
a1 = constant(1.0)
27+
a2 = constant(1.0)
28+
pi = constant(math.pi)
29+
30+
# we use this specific forcing term because we have an exact analytical solution for this case
31+
# to compare the results of the PINN solution
32+
# note that we must use tensorflow math primitives such as sin, cos, etc!
33+
forcing = - sin(a1 * pi * x) * sin(a2 * pi * y)
34+
35+
f_u = u_xx + u_yy - forcing # = 0
36+
37+
return f_u
38+
39+
40+
def func_upper_x(y):
41+
return -sin(constant(math.pi) * y) * sin(constant(math.pi))
42+
43+
44+
def func_upper_y(x):
45+
return -sin(constant(math.pi) * x) * sin(constant(math.pi))
46+
47+
48+
lower_x = dirichletBC(Domain, val=0.0, var='x', target="upper")
49+
upper_x = FunctionDirichletBC(Domain, fun=[func_upper_x], var='x', target="upper", func_inputs=["y"], n_values=10)
50+
upper_y = FunctionDirichletBC(Domain, fun=[func_upper_y], var='y', target="upper", func_inputs=["x"], n_values=10)
51+
lower_y = dirichletBC(Domain, val=0.0, var='y', target="lower")
52+
53+
BCs = [upper_x, lower_x, upper_y, lower_y]
54+
55+
layer_sizes = [2, 16, 16, 1]
56+
57+
model = CollocationSolverND()
58+
model.compile(layer_sizes, f_model, Domain, BCs)
59+
model.tf_optimizer = tf.keras.optimizers.Adam(lr=.005)
60+
model.fit(tf_iter=4000)
61+
62+
# get exact solution
63+
nx, ny = (11, 11)
64+
x = np.linspace(0, 1, nx)
65+
y = np.linspace(0, 1, ny)
66+
67+
xv, yv = np.meshgrid(x, y)
68+
69+
x = np.reshape(x, (-1, 1))
70+
y = np.reshape(y, (-1, 1))
71+
72+
# Exact analytical soln is available:
73+
Exact_u = (np.sin(math.pi * xv) * np.sin(math.pi * yv)) / (2*math.pi**2)
74+
75+
# Flatten for use
76+
u_star = Exact_u.flatten()[:, None]
77+
78+
# Plotting
79+
x = Domain.domaindict[0]['xlinspace']
80+
y = Domain.domaindict[1]["ylinspace"]
81+
82+
X, Y = np.meshgrid(x, y)
83+
84+
# print(np.shape((X,Y))) # 2, 256, 256
85+
X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None]))
86+
87+
lb = np.array([0.0, 0.0])
88+
ub = np.array([1.0, 1])
89+
90+
u_pred, f_u_pred = model.predict(X_star)
91+
92+
#error_u = tdq.helpers.find_L2_error(u_pred, u_star)
93+
#print('Error u: %e' % (error_u))
94+
95+
U_pred = tdq.plotting.get_griddata(X_star, u_pred.flatten(), (X, Y))
96+
FU_pred = tdq.plotting.get_griddata(X_star, f_u_pred.flatten(), (X, Y))
97+
98+
lb = np.array([0.0, 0.0])
99+
ub = np.array([1.0, 1.0])
100+
101+
tdq.plotting.plot_solution_domain1D(model, [x, y], ub=ub, lb=lb, Exact_u=Exact_u)

examples/transfer-learn.py

Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
import scipy.io
2+
import math
3+
import tensorflow as tf
4+
import tensordiffeq as tdq
5+
from tensordiffeq.models import CollocationSolverND
6+
from tensordiffeq.boundaries import *
7+
8+
Domain = DomainND(["x", "t"], time_var='t')
9+
10+
Domain.add("x", [-1.0, 1.0], 512)
11+
Domain.add("t", [0.0, 1.0], 201)
12+
13+
N_f = 50000
14+
Domain.generate_collocation_points(N_f)
15+
16+
17+
def func_ic(x):
18+
return x ** 2 * np.cos(math.pi * x)
19+
20+
21+
# Conditions to be considered at the boundaries for the periodic BC
22+
def deriv_model(u_model, x, t):
23+
u = u_model(tf.concat([x, t], 1))
24+
u_x = tf.gradients(u, x)[0]
25+
# u_xx = tf.gradients(u_x, x)[0]
26+
# u_xxx = tf.gradients(u_xx, x)[0]
27+
# u_xxxx = tf.gradients(u_xxx, x)[0]
28+
return u, u_x
29+
30+
31+
init = IC(Domain, [func_ic], var=[['x']])
32+
x_periodic = periodicBC(Domain, ['x'], [deriv_model])
33+
34+
BCs = [init, x_periodic]
35+
36+
37+
def f_model(u_model, x, t):
38+
u = u_model(tf.concat([x, t], 1))
39+
u_x = tf.gradients(u, x)
40+
u_xx = tf.gradients(u_x, x)
41+
u_t = tf.gradients(u, t)
42+
c1 = tdq.utils.constant(.0001)
43+
c2 = tdq.utils.constant(5.0)
44+
f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u
45+
return f_u
46+
47+
48+
col_weights = tf.Variable(tf.random.uniform([N_f, 1]), trainable=True, dtype=tf.float32)
49+
u_weights = tf.Variable(100 * tf.random.uniform([512, 1]), trainable=True, dtype=tf.float32)
50+
51+
layer_sizes = [2, 128, 128, 128, 128, 1]
52+
53+
model = CollocationSolverND()
54+
model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, col_weights=col_weights, u_weights=u_weights)
55+
model.fit(tf_iter=5000)
56+
model.save("test_model")
57+
58+
# Must re-initialize the model class in order to effectively transfer learn or resume training
59+
model = CollocationSolverND()
60+
model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, col_weights=col_weights, u_weights=u_weights)
61+
model.tf_optimizer = tf.keras.optimizers.Adam(.0001)
62+
model.tf_optimizer_weights= tf.keras.optimizers.Adam(.0001)
63+
model.load_model("test_model")
64+
model.fit(tf_iter=5000)
65+
66+
# Must re-initialize the model class in order to effectively transfer learn or resume training
67+
model = CollocationSolverND()
68+
model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, col_weights=col_weights, u_weights=u_weights)
69+
model.tf_optimizer = tf.keras.optimizers.Adam(.00001)
70+
model.tf_optimizer_weights= tf.keras.optimizers.Adam(.00001)
71+
model.load_model("test_model")
72+
model.fit(tf_iter=5000)
73+
74+
# Load high-fidelity data for error calculation
75+
data = scipy.io.loadmat('AC.mat')
76+
77+
Exact = data['uu']
78+
Exact_u = np.real(Exact)
79+
80+
81+
82+
x = Domain.domaindict[0]['xlinspace']
83+
t = Domain.domaindict[1]["tlinspace"]
84+
85+
# create mesh for plotting
86+
87+
X, T = np.meshgrid(x, t)
88+
89+
X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None]))
90+
u_star = Exact_u.T.flatten()[:, None]
91+
92+
# forward pass through model
93+
u_pred, f_u_pred = model.predict(X_star)
94+
95+
error_u = tdq.helpers.find_L2_error(u_pred, u_star)
96+
print('Error u: %e' % (error_u))
97+
98+
U_pred = tdq.plotting.get_griddata(X_star, u_pred.flatten(), (X, T))
99+
FU_pred = tdq.plotting.get_griddata(X_star, f_u_pred.flatten(), (X, T))
100+
101+
lb = np.array([-1.0, 0.0])
102+
ub = np.array([1.0, 1])
103+
104+
tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u)

0 commit comments

Comments
 (0)