Skip to content

Commit 8091b1f

Browse files
committed
Added a newtonnet config file to the repo in the tests directory.
1 parent 1e011ab commit 8091b1f

File tree

1 file changed

+66
-0
lines changed

1 file changed

+66
-0
lines changed

tests/config0.yml

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
general:
2+
me: /global/home/users/ericyuan/20230310_Transition1x/config0.yml # path to this file
3+
device: [cuda:0, cuda:1] # cpu / cuda:0 / [cuda:0, cuda:1, cuda:2, cuda:3] / list of cuda
4+
driver: /global/home/users/ericyuan/NewtonNet/cli/newtonnet_train # path to the run script
5+
output: [/global/home/users/ericyuan/20230310_Transition1x/output_final, 1] # path and iterator for the output directory
6+
7+
data:
8+
train_path: /global/scratch/users/ericyuan/Transition1x/conformation_split_0aug/train_data.npz # path to the training data
9+
val_path: /global/scratch/users/ericyuan/Transition1x/conformation_split_0aug/val_data.npz # path to the validation data
10+
test_path: /global/scratch/users/ericyuan/Transition1x/conformation_split_0aug/test_data.npz # path to the test data
11+
train_size: -1 # -1 for all
12+
test_size: -1
13+
val_size: -1
14+
cutoff: 5.0 # cutoff radius
15+
random_states: 90 # random seed for data splitting
16+
17+
model:
18+
pre_trained: /global/home/users/ericyuan/20230310_Transition1x/0.1-iv+ln-cont/training_1/models/best_model_state.tar # path to the previously trained model for warm-up start
19+
activation: swish # activation function: swish, ssp, relu, ...
20+
requires_dr: True # if derivative of the output is required
21+
w_energy: 1.0 # the weight of energy loss in the loss function
22+
w_force: 20.0 # EDITED from 100 # the weight of force loss in the loss function
23+
wf_decay: 0.0 # rate of exponential decay of force wight by training epoch
24+
w_f_mag: 0.0 # the weight of force magnitude loss in the loss function
25+
lambda_l1: 0.0 # the coefficient of L1 regularization
26+
w_f_dir: 0.0 # the weight of force direction loss in the loss function
27+
resolution: 20 # number of basis functions that describe interatomic distances
28+
n_features: 128 # number of features
29+
max_z: 10 # maximum atomic number in the chemical systems
30+
n_interactions: 3 # number of interaction blocks of newtonnet
31+
cutoff_network: poly # the cutoff function: poly (polynomial), cosine
32+
normalize_atomic: True # EDITED from false # if True the atomic energy needs to be inverse normalized, otherwise total energy will be scaled back
33+
shared_interactions: False # if True parameters of interaction blocks will be shared.
34+
normalize_filter: False #
35+
return_hessian: False # if True, hessian matrix will be returned
36+
double_update_latent: True
37+
layer_norm: True # EDITED from false # normalize hidden layer with a 1D layer_norm function
38+
39+
training:
40+
epochs: 100 # number of times the entire training data will be shown to the model
41+
tr_batch_size: 100 # number of training points (snapshots) in a batch of data that is feed to the model
42+
val_batch_size: 100 # number of validation points (snapshots) in a batch of data that is feed to the model
43+
tr_rotations: 0 # number of times the training data needs to be randomly rotated (redundant for NewtonNet model)
44+
val_rotations: 0 # number of times the validation data needs to be randomly rotated (redundant for NewtonNet model)
45+
tr_frz_rot: False # if True, fixed rotations matrix will be used at each epoch
46+
val_frz_rot: False #
47+
tr_keep_original: True # if True, the original orientation of data will be preserved as part of training set (beside other rotations)
48+
val_keep_original: True #
49+
shuffle: True # shuffle training data before each epoch
50+
drop_last: True # if True, drop the left over data points that are less than a full batch size
51+
lr: 1.0e-4 # learning rate
52+
lr_scheduler: [plateau, 15, 30, 0.7, 1.0e-6] # the learning rate decay based on the plateau algorithm: n_epoch_averaging, patience, decay_rate, stop_lr
53+
# lr_scheduler: [decay, 0.05] # the learning rate decay based on exponential decay: the rate of decay
54+
weight_decay: 1.0e-5 # the l2 norm
55+
dropout: 0.0 # dropout between 0 and 1
56+
57+
hooks:
58+
vismolvector3d: False # if the latent force vectors need to be visualized (only works when the return_latent is on)
59+
60+
checkpoint:
61+
log: 1 # log the results every this many epochs
62+
val: 1 # evaluate the performance on the validation set every this many epochs
63+
test: 10 # evaluate the performance on the test set every this many epochs
64+
model: 10 # save the model every this many epochs
65+
verbose: False # verbosity of the logging
66+

0 commit comments

Comments
 (0)