-
Notifications
You must be signed in to change notification settings - Fork 582
feat(jax): export call_lower to SavedModel via jax2tf #4254
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from 11 commits
Commits
Show all changes
17 commits
Select commit
Hold shift + click to select a range
d46d5f0
export savedmodel
njzjz 9c616cf
Merge branch 'devel' into savedmodel
njzjz ed1288e
move to a seperated module
njzjz ecba709
refactor
njzjz e1d609e
fix bugs
njzjz 6dd3617
test
njzjz dc1de12
add an error message
njzjz 5a7fc4a
revert
njzjz 980b4a9
Update deepmd/jax/jax2tf/tfmodel.py
njzjz 8e216f5
use functions to store constants so it can be read by C++
njzjz 3e919a1
name functions
njzjz 60694a8
test test_io in a seperated run
njzjz 15750a2
bump tensorflow
njzjz e2738cb
no-build-isolation
njzjz de0caa1
typo
njzjz 306eef3
fix eval argument in the test
njzjz 9a94118
change to openmpi
njzjz File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,11 @@ | ||
| # SPDX-License-Identifier: LGPL-3.0-or-later | ||
| import tensorflow as tf | ||
|
|
||
| if not tf.executing_eagerly(): | ||
| # TF disallow temporary eager execution | ||
| raise RuntimeError( | ||
| "Unfortunatly, jax2tf (requires eager execution) cannot be used with the " | ||
| "TensorFlow backend (disables eager execution). " | ||
| "If you are converting a model between different backends, " | ||
| "considering converting to the `.dp` format first." | ||
| ) | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,172 @@ | ||
| # SPDX-License-Identifier: LGPL-3.0-or-later | ||
| import json | ||
|
|
||
| import tensorflow as tf | ||
| from jax.experimental import ( | ||
| jax2tf, | ||
| ) | ||
|
|
||
| from deepmd.jax.model.base_model import ( | ||
| BaseModel, | ||
| ) | ||
|
|
||
|
|
||
| def deserialize_to_file(model_file: str, data: dict) -> None: | ||
| """Deserialize the dictionary to a model file. | ||
|
|
||
| Parameters | ||
| ---------- | ||
| model_file : str | ||
| The model file to be saved. | ||
| data : dict | ||
| The dictionary to be deserialized. | ||
| """ | ||
| if model_file.endswith(".savedmodel"): | ||
njzjz marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| model = BaseModel.deserialize(data["model"]) | ||
| model_def_script = data["model_def_script"] | ||
njzjz marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| call_lower = model.call_lower | ||
|
|
||
| tf_model = tf.Module() | ||
|
|
||
| def exported_whether_do_atomic_virial(do_atomic_virial): | ||
| def call_lower_with_fixed_do_atomic_virial( | ||
| coord, atype, nlist, mapping, fparam, aparam | ||
| ): | ||
| return call_lower( | ||
| coord, | ||
| atype, | ||
| nlist, | ||
| mapping, | ||
| fparam, | ||
| aparam, | ||
| do_atomic_virial=do_atomic_virial, | ||
| ) | ||
|
|
||
| return jax2tf.convert( | ||
| call_lower_with_fixed_do_atomic_virial, | ||
| polymorphic_shapes=[ | ||
| "(nf, nloc + nghost, 3)", | ||
| "(nf, nloc + nghost)", | ||
| f"(nf, nloc, {model.get_nnei()})", | ||
| "(nf, nloc + nghost)", | ||
| f"(nf, {model.get_dim_fparam()})", | ||
| f"(nf, nloc, {model.get_dim_aparam()})", | ||
| ], | ||
| with_gradient=True, | ||
| ) | ||
|
|
||
| # Save a function that can take scalar inputs. | ||
| # We need to explicit set the function name, so C++ can find it. | ||
| @tf.function( | ||
| autograph=False, | ||
| input_signature=[ | ||
| tf.TensorSpec([None, None, 3], tf.float64), | ||
| tf.TensorSpec([None, None], tf.int32), | ||
| tf.TensorSpec([None, None, model.get_nnei()], tf.int64), | ||
| tf.TensorSpec([None, None], tf.int64), | ||
| tf.TensorSpec([None, model.get_dim_fparam()], tf.float64), | ||
| tf.TensorSpec([None, None, model.get_dim_aparam()], tf.float64), | ||
| ], | ||
| ) | ||
| def call_lower_without_atomic_virial( | ||
| coord, atype, nlist, mapping, fparam, aparam | ||
| ): | ||
| return exported_whether_do_atomic_virial(do_atomic_virial=False)( | ||
| coord, atype, nlist, mapping, fparam, aparam | ||
| ) | ||
|
|
||
| tf_model.call_lower = call_lower_without_atomic_virial | ||
|
|
||
| @tf.function( | ||
| autograph=False, | ||
| input_signature=[ | ||
| tf.TensorSpec([None, None, 3], tf.float64), | ||
| tf.TensorSpec([None, None], tf.int32), | ||
| tf.TensorSpec([None, None, model.get_nnei()], tf.int64), | ||
| tf.TensorSpec([None, None], tf.int64), | ||
| tf.TensorSpec([None, model.get_dim_fparam()], tf.float64), | ||
| tf.TensorSpec([None, None, model.get_dim_aparam()], tf.float64), | ||
| ], | ||
| ) | ||
| def call_lower_with_atomic_virial(coord, atype, nlist, mapping, fparam, aparam): | ||
| return exported_whether_do_atomic_virial(do_atomic_virial=True)( | ||
| coord, atype, nlist, mapping, fparam, aparam | ||
| ) | ||
|
|
||
| tf_model.call_lower_atomic_virial = call_lower_with_atomic_virial | ||
|
|
||
| # set functions to export other attributes | ||
| @tf.function | ||
| def get_type_map(): | ||
| return tf.constant(model.get_type_map(), dtype=tf.string) | ||
|
|
||
| tf_model.get_type_map = get_type_map | ||
|
|
||
| @tf.function | ||
| def get_rcut(): | ||
| return tf.constant(model.get_rcut(), dtype=tf.double) | ||
|
|
||
| tf_model.get_rcut = get_rcut | ||
|
|
||
| @tf.function | ||
| def get_dim_fparam(): | ||
| return tf.constant(model.get_dim_fparam(), dtype=tf.int64) | ||
|
|
||
| tf_model.get_dim_fparam = get_dim_fparam | ||
|
|
||
| @tf.function | ||
| def get_dim_aparam(): | ||
| return tf.constant(model.get_dim_aparam(), dtype=tf.int64) | ||
|
|
||
| tf_model.get_dim_aparam = get_dim_aparam | ||
|
|
||
| @tf.function | ||
| def get_sel_type(): | ||
| return tf.constant(model.get_sel_type(), dtype=tf.int64) | ||
|
|
||
| tf_model.get_sel_type = get_sel_type | ||
|
|
||
| @tf.function | ||
| def is_aparam_nall(): | ||
| return tf.constant(model.is_aparam_nall(), dtype=tf.bool) | ||
|
|
||
| tf_model.is_aparam_nall = is_aparam_nall | ||
|
|
||
| @tf.function | ||
| def model_output_type(): | ||
| return tf.constant(model.model_output_type(), dtype=tf.string) | ||
|
|
||
| tf_model.model_output_type = model_output_type | ||
|
|
||
| @tf.function | ||
| def mixed_types(): | ||
| return tf.constant(model.mixed_types(), dtype=tf.bool) | ||
|
|
||
| tf_model.mixed_types = mixed_types | ||
|
|
||
| if model.get_min_nbor_dist() is not None: | ||
|
|
||
| @tf.function | ||
| def get_min_nbor_dist(): | ||
| return tf.constant(model.get_min_nbor_dist(), dtype=tf.double) | ||
|
|
||
| tf_model.get_min_nbor_dist = get_min_nbor_dist | ||
|
|
||
| @tf.function | ||
| def get_sel(): | ||
| return tf.constant(model.get_sel(), dtype=tf.int64) | ||
|
|
||
| tf_model.get_sel = get_sel | ||
|
|
||
| @tf.function | ||
| def get_model_def_script(): | ||
| return tf.constant( | ||
| json.dumps(model_def_script, separators=(",", ":")), dtype=tf.string | ||
| ) | ||
|
|
||
| tf_model.get_model_def_script = get_model_def_script | ||
| tf.saved_model.save( | ||
| tf_model, | ||
| model_file, | ||
| options=tf.saved_model.SaveOptions(experimental_custom_gradients=True), | ||
| ) | ||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.