diff --git a/deepmd/pd/cxx_op.py b/deepmd/pd/cxx_op.py new file mode 100644 index 0000000000..9962bc0d24 --- /dev/null +++ b/deepmd/pd/cxx_op.py @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import importlib +from types import ( + ModuleType, +) + + +def load_library(module_name: str) -> tuple[bool, ModuleType]: + """Load OP library and return the module if success. + + Parameters + ---------- + module_name : str + Name of the module + + Returns + ------- + bool + Whether the library is loaded successfully + ModuleType + loaded custom operator module + """ + if importlib.util.find_spec(module_name) is not None: + module = importlib.import_module(module_name) + return True, module + + return False, None + + +ENABLE_CUSTOMIZED_OP, paddle_ops_deepmd = load_library("deepmd_op_pd") + +__all__ = [ + "ENABLE_CUSTOMIZED_OP", + "paddle_ops_deepmd", +] diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py index 8d96c4e6f2..3ef075f359 100644 --- a/deepmd/pd/entrypoints/main.py +++ b/deepmd/pd/entrypoints/main.py @@ -368,12 +368,13 @@ def freeze( model.forward = paddle.jit.to_static( model.forward, input_spec=[ - InputSpec([1, -1, 3], dtype="float64", name="coord"), # coord - InputSpec([1, -1], dtype="int64", name="atype"), # atype - InputSpec([1, 9], dtype="float64", name="box"), # box + InputSpec([-1, -1, 3], dtype="float64", name="coord"), # coord + InputSpec([-1, -1], dtype="int64", name="atype"), # atype + InputSpec([-1, 9], dtype="float64", name="box"), # box None, # fparam None, # aparam - True, # do_atomic_virial + # InputSpec([], dtype="bool", name="do_atomic_virial"), # do_atomic_virial + False, # do_atomic_virial ], full_graph=True, ) @@ -388,14 +389,23 @@ def freeze( model.forward_lower = paddle.jit.to_static( model.forward_lower, input_spec=[ - InputSpec([1, -1, 3], dtype="float64", name="coord"), # extended_coord - InputSpec([1, -1], dtype="int32", name="atype"), # extended_atype - InputSpec([1, -1, -1], dtype="int32", name="nlist"), # nlist - InputSpec([1, -1], dtype="int64", name="mapping"), # mapping + InputSpec([-1, -1, 3], dtype="float64", name="coord"), # extended_coord + InputSpec([-1, -1], dtype="int32", name="atype"), # extended_atype + InputSpec([-1, -1, -1], dtype="int32", name="nlist"), # nlist + InputSpec([-1, -1], dtype="int64", name="mapping"), # mapping None, # fparam None, # aparam - True, # do_atomic_virial - None, # comm_dict + # InputSpec([], dtype="bool", name="do_atomic_virial"), # do_atomic_virial + False, # do_atomic_virial + ( + InputSpec([-1], "int64", name="send_list"), + InputSpec([-1], "int32", name="send_proc"), + InputSpec([-1], "int32", name="recv_proc"), + InputSpec([-1], "int32", name="send_num"), + InputSpec([-1], "int32", name="recv_num"), + InputSpec([-1], "int64", name="communicator"), + # InputSpec([1], "int64", name="has_spin"), + ), # comm_dict ], full_graph=True, ) diff --git a/deepmd/pd/model/atomic_model/base_atomic_model.py b/deepmd/pd/model/atomic_model/base_atomic_model.py index 1100813fb4..803964218a 100644 --- a/deepmd/pd/model/atomic_model/base_atomic_model.py +++ b/deepmd/pd/model/atomic_model/base_atomic_model.py @@ -1,9 +1,9 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import copy import logging from typing import ( Callable, + NoReturn, Optional, Union, ) @@ -79,7 +79,8 @@ def __init__( pair_exclude_types: list[tuple[int, int]] = [], rcond: Optional[float] = None, preset_out_bias: Optional[dict[str, np.ndarray]] = None, - ): + data_stat_protect: float = 1e-2, + ) -> None: paddle.nn.Layer.__init__(self) BaseAtomicModel_.__init__(self) self.type_map = type_map @@ -87,8 +88,9 @@ def __init__( self.reinit_pair_exclude(pair_exclude_types) self.rcond = rcond self.preset_out_bias = preset_out_bias + self.data_stat_protect = data_stat_protect - def init_out_stat(self): + def init_out_stat(self) -> None: """Initialize the output bias.""" ntypes = self.get_ntypes() self.bias_keys: list[str] = list(self.fitting_output_def().keys()) @@ -104,7 +106,7 @@ def init_out_stat(self): def set_out_bias(self, out_bias: paddle.Tensor) -> None: self.out_bias = out_bias - def __setitem__(self, key, value): + def __setitem__(self, key, value) -> None: if key in ["out_bias"]: self.out_bias = value elif key in ["out_std"]: @@ -124,10 +126,20 @@ def get_type_map(self) -> list[str]: """Get the type map.""" return self.type_map + def get_compute_stats_distinguish_types(self) -> bool: + """Get whether the fitting net computes stats which are not distinguished between different types of atoms.""" + return True + + def get_intensive(self) -> bool: + """Whether the fitting property is intensive.""" + return False + def reinit_atom_exclude( self, - exclude_types: list[int] = [], - ): + exclude_types: Optional[list[int]] = None, + ) -> None: + if exclude_types is None: + exclude_types = [] self.atom_exclude_types = exclude_types if exclude_types == []: self.atom_excl = None @@ -137,7 +149,7 @@ def reinit_atom_exclude( def reinit_pair_exclude( self, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: self.pair_exclude_types = exclude_types if exclude_types == []: self.pair_excl = None @@ -191,7 +203,7 @@ def forward_common_atomic( mapping: Optional[paddle.Tensor] = None, fparam: Optional[paddle.Tensor] = None, aparam: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ) -> dict[str, paddle.Tensor]: """Common interface for atomic inference. @@ -232,7 +244,7 @@ def forward_common_atomic( if self.pair_excl is not None: pair_mask = self.pair_excl(nlist, extended_atype) # exclude neighbors in the nlist - nlist = paddle.where(pair_mask == 1, nlist, -1) + nlist = paddle.where(pair_mask == 1, nlist, paddle.full_like(nlist, -1)) ext_atom_mask = self.make_atom_mask(extended_atype) ret_dict = self.forward_atomic( @@ -274,7 +286,7 @@ def forward( mapping: Optional[paddle.Tensor] = None, fparam: Optional[paddle.Tensor] = None, aparam: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ) -> dict[str, paddle.Tensor]: return self.forward_common_atomic( extended_coord, @@ -332,7 +344,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "BaseAtomicModel": - data = copy.deepcopy(data) + data = data.copy() variables = data.pop("@variables", None) variables = ( {"out_bias": None, "out_std": None} if variables is None else variables @@ -354,7 +366,7 @@ def compute_or_load_stat( self, merged: Union[Callable[[], list[dict]], list[dict]], stat_file_path: Optional[DPPath] = None, - ): + ) -> NoReturn: """ Compute the output statistics (e.g. energy bias) for the fitting net from packed data. @@ -377,7 +389,7 @@ def compute_or_load_out_stat( self, merged: Union[Callable[[], list[dict]], list[dict]], stat_file_path: Optional[DPPath] = None, - ): + ) -> None: """ Compute the output statistics (e.g. energy bias) for the fitting net from packed data. @@ -457,7 +469,6 @@ def change_out_bias( model_forward=self._get_forward_wrapper_func(), rcond=self.rcond, preset_bias=self.preset_out_bias, - atomic_output=self.atomic_output_def(), ) self._store_out_stat(delta_bias, out_std, add=True) elif bias_adjust_mode == "set-by-statistic": @@ -468,7 +479,8 @@ def change_out_bias( stat_file_path=stat_file_path, rcond=self.rcond, preset_bias=self.preset_out_bias, - atomic_output=self.atomic_output_def(), + stats_distinguish_types=self.get_compute_stats_distinguish_types(), + intensive=self.get_intensive(), ) self._store_out_stat(bias_out, std_out) else: @@ -544,7 +556,7 @@ def _store_out_stat( out_bias: dict[str, paddle.Tensor], out_std: dict[str, paddle.Tensor], add: bool = False, - ): + ) -> None: ntypes = self.get_ntypes() out_bias_data = paddle.clone(self.out_bias) out_std_data = paddle.clone(self.out_std) diff --git a/deepmd/pd/model/atomic_model/dp_atomic_model.py b/deepmd/pd/model/atomic_model/dp_atomic_model.py index 1089b93a68..00fefa4e2b 100644 --- a/deepmd/pd/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pd/model/atomic_model/dp_atomic_model.py @@ -62,7 +62,9 @@ def __init__( self.fitting_net = fitting super().init_out_stat() self.enable_eval_descriptor_hook = False + self.enable_eval_fitting_last_layer_hook = False self.eval_descriptor_list = [] + self.eval_fitting_last_layer_list = [] # register 'type_map' as buffer def _string_to_array(s: str) -> list[int]: @@ -112,16 +114,29 @@ def _string_to_array(s: str) -> list[int]: self.buffer_aparam_nall.name = "buffer_aparam_nall" eval_descriptor_list: list[paddle.Tensor] + eval_fitting_last_layer_list: list[paddle.Tensor] def set_eval_descriptor_hook(self, enable: bool) -> None: """Set the hook for evaluating descriptor and clear the cache for descriptor list.""" self.enable_eval_descriptor_hook = enable - self.eval_descriptor_list = [] + # = [] does not work; See #4533 + self.eval_descriptor_list.clear() def eval_descriptor(self) -> paddle.Tensor: """Evaluate the descriptor.""" return paddle.concat(self.eval_descriptor_list) + def set_eval_fitting_last_layer_hook(self, enable: bool) -> None: + """Set the hook for evaluating fitting last layer output and clear the cache for fitting last layer output list.""" + self.enable_eval_fitting_last_layer_hook = enable + self.fitting_net.set_return_middle_output(enable) + # = [] does not work; See #4533 + self.eval_fitting_last_layer_list.clear() + + def eval_fitting_last_layer(self) -> paddle.Tensor: + """Evaluate the fitting last layer output.""" + return paddle.concat(self.eval_fitting_last_layer_list) + def fitting_output_def(self) -> FittingOutputDef: """Get the output def of the fitting net.""" return ( @@ -250,7 +265,7 @@ def forward_atomic( mapping: Optional[paddle.Tensor] = None, fparam: Optional[paddle.Tensor] = None, aparam: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ) -> dict[str, paddle.Tensor]: """Return atomic prediction. @@ -288,7 +303,7 @@ def forward_atomic( ) assert descriptor is not None if self.enable_eval_descriptor_hook: - self.eval_descriptor_list.append(descriptor) + self.eval_descriptor_list.append(descriptor.detach()) # energy, force fit_ret = self.fitting_net( descriptor, @@ -299,6 +314,13 @@ def forward_atomic( fparam=fparam, aparam=aparam, ) + if self.enable_eval_fitting_last_layer_hook: + assert "middle_output" in fit_ret, ( + "eval_fitting_last_layer not supported for this fitting net!" + ) + self.eval_fitting_last_layer_list.append( + fit_ret.pop("middle_output").detach() + ) return fit_ret def get_out_bias(self) -> paddle.Tensor: @@ -343,6 +365,9 @@ def wrapped_sampler(): return sampled self.descriptor.compute_input_stats(wrapped_sampler, stat_file_path) + self.fitting_net.compute_input_stats( + wrapped_sampler, protection=self.data_stat_protect + ) self.compute_or_load_out_stat(wrapped_sampler, stat_file_path) def get_dim_fparam(self) -> int: diff --git a/deepmd/pd/model/descriptor/dpa1.py b/deepmd/pd/model/descriptor/dpa1.py index ad45c13d1d..7fd3de02e9 100644 --- a/deepmd/pd/model/descriptor/dpa1.py +++ b/deepmd/pd/model/descriptor/dpa1.py @@ -596,7 +596,7 @@ def forward( extended_atype: paddle.Tensor, nlist: paddle.Tensor, mapping: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ): """Compute the descriptor. diff --git a/deepmd/pd/model/descriptor/dpa2.py b/deepmd/pd/model/descriptor/dpa2.py index 44b3229f66..ab3b6f5736 100644 --- a/deepmd/pd/model/descriptor/dpa2.py +++ b/deepmd/pd/model/descriptor/dpa2.py @@ -712,7 +712,7 @@ def forward( extended_atype: paddle.Tensor, nlist: paddle.Tensor, mapping: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ): """Compute the descriptor. @@ -747,7 +747,7 @@ def forward( """ # cast the input to internal precsion - extended_coord = extended_coord.to(dtype=self.prec) + extended_coord = extended_coord.astype(dtype=self.prec) use_three_body = self.use_three_body nframes, nloc, nnei = nlist.shape @@ -798,14 +798,15 @@ def forward( assert self.tebd_transform is not None g1 = g1 + self.tebd_transform(g1_inp) # mapping g1 - if comm_dict is None: - assert mapping is not None + if comm_dict is None or len(comm_dict) == 0: + if paddle.in_dynamic_mode(): + assert mapping is not None mapping_ext = ( mapping.reshape([nframes, nall]) .unsqueeze(-1) .expand([-1, -1, g1.shape[-1]]) ) - g1_ext = paddle.take_along_axis(g1, mapping_ext, 1) + g1_ext = paddle.take_along_axis(g1, mapping_ext, 1, broadcast=False) g1 = g1_ext # repformer g1, g2, h2, rot_mat, sw = self.repformers( @@ -823,11 +824,11 @@ def forward( if self.concat_output_tebd: g1 = paddle.concat([g1, g1_inp], axis=-1) return ( - g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), - rot_mat.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), - g2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), - h2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), - sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + g1.astype(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.astype(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + g2.astype(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + h2.astype(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + sw.astype(dtype=env.GLOBAL_PD_FLOAT_PRECISION), ) @classmethod diff --git a/deepmd/pd/model/descriptor/dpa3.py b/deepmd/pd/model/descriptor/dpa3.py index 654b328f76..7754de01e5 100644 --- a/deepmd/pd/model/descriptor/dpa3.py +++ b/deepmd/pd/model/descriptor/dpa3.py @@ -457,7 +457,7 @@ def forward( extended_atype: paddle.Tensor, nlist: paddle.Tensor, mapping: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ): """Compute the descriptor. diff --git a/deepmd/pd/model/descriptor/env_mat.py b/deepmd/pd/model/descriptor/env_mat.py index 214fb593de..2cc23fcadf 100644 --- a/deepmd/pd/model/descriptor/env_mat.py +++ b/deepmd/pd/model/descriptor/env_mat.py @@ -23,11 +23,11 @@ def _make_env_mat( nall = coord.shape[1] mask = nlist >= 0 # nlist = nlist * mask ## this impl will contribute nans in Hessian calculation. - nlist = paddle.where(mask, nlist, nall - 1) + nlist = paddle.where(mask, nlist, paddle.full_like(nlist, nall - 1)) coord_l = coord[:, :natoms].reshape([bsz, -1, 1, 3]) index = nlist.reshape([bsz, -1]).unsqueeze(-1).expand([-1, -1, 3]) coord_pad = paddle.concat([coord, coord[:, -1:, :] + rcut], axis=1) - coord_r = paddle.take_along_axis(coord_pad, axis=1, indices=index) + coord_r = paddle.take_along_axis(coord_pad, axis=1, indices=index, broadcast=False) coord_r = coord_r.reshape([bsz, natoms, nnei, 3]) diff = coord_r - coord_l length = paddle.linalg.norm(diff, axis=-1, keepdim=True) diff --git a/deepmd/pd/model/descriptor/repflows.py b/deepmd/pd/model/descriptor/repflows.py index 8d6855e16c..4ea72daf02 100644 --- a/deepmd/pd/model/descriptor/repflows.py +++ b/deepmd/pd/model/descriptor/repflows.py @@ -10,6 +10,10 @@ from deepmd.dpmodel.utils.seed import ( child_seed, ) +from deepmd.pd.cxx_op import ( + ENABLE_CUSTOMIZED_OP, + paddle_ops_deepmd, +) from deepmd.pd.model.descriptor.descriptor import ( DescriptorBlock, ) @@ -34,6 +38,9 @@ from deepmd.pd.utils.exclude_mask import ( PairExcludeMask, ) +from deepmd.pd.utils.spin import ( + concat_switch_virtual, +) from deepmd.pd.utils.utils import ( ActivationFn, ) @@ -48,6 +55,29 @@ RepFlowLayer, ) +if not ENABLE_CUSTOMIZED_OP: + + def border_op( + argument0, + argument1, + argument2, + argument3, + argument4, + argument5, + argument6, + argument7, + argument8, + ) -> paddle.Tensor: + raise NotImplementedError( + "border_op is not available since customized Paddle OP library is not built when freezing the model. " + "See documentation for DPA3 for details." + ) + + # Note: this hack cannot actually save a model that can be run using LAMMPS. + paddle_ops_deepmd_border_op = border_op +else: + paddle_ops_deepmd_border_op = paddle_ops_deepmd.border_op + @DescriptorBlock.register("se_repflow") class DescrptBlockRepflows(DescriptorBlock): @@ -413,17 +443,18 @@ def forward( extended_atype: paddle.Tensor, extended_atype_embd: Optional[paddle.Tensor] = None, mapping: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ): parallel_mode = comm_dict is not None if not parallel_mode: - assert mapping is not None + if paddle.in_dynamic_mode(): + assert mapping is not None nframes, nloc, nnei = nlist.shape nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 atype = extended_atype[:, :nloc] # nb x nloc x nnei exclude_mask = self.emask(nlist, extended_atype) - nlist = paddle.where(exclude_mask != 0, nlist, -1) + nlist = paddle.where(exclude_mask != 0, nlist, paddle.full_like(nlist, -1)) # nb x nloc x nnei x 4, nb x nloc x nnei x 3, nb x nloc x nnei x 1 dmatrix, diff, sw = prod_env_mat( extended_coord, @@ -446,7 +477,7 @@ def forward( :, :, : self.a_sel ] a_nlist = nlist[:, :, : self.a_sel] - a_nlist = paddle.where(a_dist_mask, a_nlist, -1) + a_nlist = paddle.where(a_dist_mask, a_nlist, paddle.full_like(a_nlist, -1)) _, a_diff, a_sw = prod_env_mat( extended_coord, a_nlist, @@ -496,7 +527,8 @@ def forward( angle_input = cosine_ij.unsqueeze(-1) / (paddle.pi**0.5) if not parallel_mode and self.use_loc_mapping: - assert mapping is not None + if paddle.in_dynamic_mode(): + assert mapping is not None # convert nlist from nall to nloc index nlist = paddle.take_along_axis( mapping, @@ -541,7 +573,8 @@ def forward( # nb x nall x n_dim if not parallel_mode: - assert mapping is not None + if paddle.in_dynamic_mode(): + assert mapping is not None mapping = ( mapping.reshape([nframes, nall]) .unsqueeze(-1) @@ -551,14 +584,95 @@ def forward( # node_ebd: nb x nloc x n_dim # node_ebd_ext: nb x nall x n_dim [OR] nb x nloc x n_dim when not parallel_mode if not parallel_mode: - assert mapping is not None + if paddle.in_dynamic_mode(): + assert mapping is not None node_ebd_ext = ( paddle.take_along_axis(node_ebd, mapping, 1, broadcast=False) if not self.use_loc_mapping else node_ebd ) else: - raise NotImplementedError("Not implemented") + assert len(comm_dict) >= 6 + has_spin = len(comm_dict) >= 7 + if not has_spin: + n_padding = nall - nloc + if paddle.in_dynamic_mode(): + node_ebd = paddle.nn.functional.pad( + node_ebd.squeeze(0), [0, 0, 0, n_padding], value=0.0 + ) + else: + _fill_shape = node_ebd.shape[1:] + _fill_shape[0] = n_padding + node_ebd = paddle.concat( + [ + node_ebd.squeeze(0), + paddle.zeros(_fill_shape, dtype=node_ebd.dtype), + ], + axis=0, + ) + # [nframes, nloc, tebd_dim] + real_nloc = nloc + real_nall = nall + else: + # for spin + real_nloc = nloc // 2 + real_nall = nall // 2 + real_n_padding = real_nall - real_nloc + node_ebd_real, node_ebd_virtual = paddle.split( + node_ebd, [real_nloc, real_nloc], axis=1 + ) + # mix_node_ebd: nb x real_nloc x (n_dim * 2) + mix_node_ebd = paddle.concat( + [node_ebd_real, node_ebd_virtual], axis=2 + ) + # nb x real_nall x (n_dim * 2) + if paddle.in_dynamic_mode(): + node_ebd = paddle.nn.functional.pad( + mix_node_ebd.squeeze(0), + (0, 0, 0, real_n_padding), + value=0.0, + ) + else: + _fill_shape = mix_node_ebd.shape[1:] + _fill_shape[0] = real_n_padding + node_ebd = paddle.concat( + [ + mix_node_ebd.squeeze(0), + paddle.zeros(_fill_shape, dtype=mix_node_ebd.dtype), + ], + axis=0, + ) + + assert len(comm_dict) >= 6 + ret = paddle_ops_deepmd_border_op( + comm_dict[0], + comm_dict[1], + comm_dict[2], + comm_dict[3], + comm_dict[4], + node_ebd, + comm_dict[5], + paddle.to_tensor( + real_nloc, + dtype=paddle.int32, + place=paddle.CPUPlace(), + ), # should be int of c++, placed on cpu + paddle.to_tensor( + real_nall - real_nloc, + dtype=paddle.int32, + place=paddle.CPUPlace(), + ), # should be int of c++, placed on cpu + ) + if not paddle.in_dynamic_mode(): + ret = paddle.assign(ret) + node_ebd_ext = ret.unsqueeze(0) + if has_spin: + node_ebd_real_ext, node_ebd_virtual_ext = paddle.split( + node_ebd_ext, [n_dim, n_dim], axis=2 + ) + node_ebd_ext = concat_switch_virtual( + node_ebd_real_ext, node_ebd_virtual_ext, real_nloc + ) node_ebd, edge_ebd, angle_ebd = ll.forward( node_ebd_ext, edge_ebd, diff --git a/deepmd/pd/model/descriptor/repformer_layer.py b/deepmd/pd/model/descriptor/repformer_layer.py index 4dad08fff8..fc66e1d6af 100644 --- a/deepmd/pd/model/descriptor/repformer_layer.py +++ b/deepmd/pd/model/descriptor/repformer_layer.py @@ -954,9 +954,7 @@ def _update_g1_conv( ).unsqueeze(-1) else: gg1 = _apply_switch(gg1, sw) - invnnei = (1.0 / float(nnei)) * paddle.ones( - (nb, nloc, 1), dtype=gg1.dtype - ).to(device=gg1.place) + invnnei = (1.0 / float(nnei)) * paddle.ones((nb, nloc, 1), dtype=gg1.dtype) if not self.g1_out_conv: # nb x nloc x ng2 g1_11 = paddle.sum(g2 * gg1, axis=2) * invnnei @@ -1026,11 +1024,10 @@ def _cal_hg( if not use_sqrt_nnei: invnnei = (1.0 / float(nnei)) * paddle.ones( (nb, nloc, 1, 1), dtype=g2.dtype - ).to(device=g2.place) + ) else: invnnei = paddle.rsqrt( - float(nnei) - * paddle.ones([nb, nloc, 1, 1], dtype=g2.dtype).to(device=g2.place) + float(nnei) * paddle.ones([nb, nloc, 1, 1], dtype=g2.dtype) ) # nb x nloc x 3 x ng2 h2g2 = paddle.matmul(paddle.transpose(h2, [0, 1, 3, 2]), g2) * invnnei diff --git a/deepmd/pd/model/descriptor/repformers.py b/deepmd/pd/model/descriptor/repformers.py index d2fab44c5e..09e9b51c83 100644 --- a/deepmd/pd/model/descriptor/repformers.py +++ b/deepmd/pd/model/descriptor/repformers.py @@ -10,6 +10,10 @@ from deepmd.dpmodel.utils.seed import ( child_seed, ) +from deepmd.pd.cxx_op import ( + ENABLE_CUSTOMIZED_OP, + paddle_ops_deepmd, +) from deepmd.pd.model.descriptor.descriptor import ( DescriptorBlock, ) @@ -31,6 +35,9 @@ from deepmd.pd.utils.exclude_mask import ( PairExcludeMask, ) +from deepmd.pd.utils.spin import ( + concat_switch_virtual, +) from deepmd.pd.utils.utils import ( ActivationFn, ) @@ -45,6 +52,29 @@ RepformerLayer, ) +if not ENABLE_CUSTOMIZED_OP: + + def border_op( + argument0, + argument1, + argument2, + argument3, + argument4, + argument5, + argument6, + argument7, + argument8, + ) -> paddle.Tensor: + raise NotImplementedError( + "border_op is not available since customized Paddle OP library is not built when freezing the model. " + "See documentation for DPA3 for details." + ) + + # Note: this hack cannot actually save a model that can be run using LAMMPS. + paddle_ops_deepmd_border_op = border_op +else: + paddle_ops_deepmd_border_op = paddle_ops_deepmd.border_op + @DescriptorBlock.register("se_repformer") @DescriptorBlock.register("se_uni") @@ -174,8 +204,8 @@ def __init__( The epsilon value for layer normalization. seed : int, optional Random seed for parameter initialization. - trainable : bool, default: True - Whether this block is trainable + trainable : bool + Whether the block is trainable """ super().__init__() self.rcut = float(rcut) @@ -378,9 +408,9 @@ def forward( extended_atype_embd: Optional[paddle.Tensor] = None, mapping: Optional[paddle.Tensor] = None, type_embedding: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ): - if comm_dict is None: + if (comm_dict is None or len(comm_dict) == 0) and paddle.in_dynamic_mode(): assert mapping is not None assert extended_atype_embd is not None nframes, nloc, nnei = nlist.shape @@ -406,7 +436,7 @@ def forward( sw = sw.masked_fill(~nlist_mask, 0.0) # [nframes, nloc, tebd_dim] - if comm_dict is None: + if comm_dict is None or len(comm_dict) == 0: if paddle.in_dynamic_mode(): assert isinstance(extended_atype_embd, paddle.Tensor) # for jit atype_embd = extended_atype_embd[:, :nloc, :] @@ -414,8 +444,8 @@ def forward( assert list(atype_embd.shape) == [nframes, nloc, self.g1_dim] else: atype_embd = extended_atype_embd - if paddle.in_dynamic_mode(): - assert isinstance(atype_embd, paddle.Tensor) # for jit + if paddle.in_dynamic_mode(): + assert isinstance(atype_embd, paddle.Tensor) # for jit g1 = self.act(atype_embd) ng1 = g1.shape[-1] # nb x nloc x nnei x 1, nb x nloc x nnei x 3 @@ -432,8 +462,9 @@ def forward( # if the a neighbor is real or not is indicated by nlist_mask nlist[nlist == -1] = 0 # nb x nall x ng1 - if comm_dict is None: - assert mapping is not None + if comm_dict is None or len(comm_dict) == 0: + if paddle.in_dynamic_mode(): + assert mapping is not None mapping = ( mapping.reshape([nframes, nall]) .unsqueeze(-1) @@ -442,14 +473,91 @@ def forward( for idx, ll in enumerate(self.layers): # g1: nb x nloc x ng1 # g1_ext: nb x nall x ng1 - if comm_dict is None: - assert mapping is not None + if comm_dict is None or len(comm_dict) == 0: + if paddle.in_dynamic_mode(): + assert mapping is not None g1_ext = paddle.take_along_axis( g1, axis=1, indices=mapping, broadcast=False ) else: - raise NotImplementedError("Not implemented yet") - + has_spin = len(comm_dict) >= 7 + if not has_spin: + n_padding = nall - nloc + if paddle.in_dynamic_mode(): + g1 = paddle.nn.functional.pad( + g1.squeeze(0), + (0, 0, 0, n_padding), + value=0.0, + pad_from_left_axis=False, + ) + else: + _fill_shape = g1.shape[1:] + _fill_shape[0] = n_padding + g1 = paddle.concat( + [g1.squeeze(0), paddle.zeros(_fill_shape, dtype=g1.dtype)], + axis=0, + ) + real_nloc = nloc + real_nall = nall + else: + # for spin + real_nloc = nloc // 2 + real_nall = nall // 2 + real_n_padding = real_nall - real_nloc + g1_real, g1_virtual = paddle.split( + g1, [real_nloc, real_nloc], axis=1 + ) + # mix_g1: nb x real_nloc x (ng1 * 2) + mix_g1 = paddle.concat([g1_real, g1_virtual], axis=2) + # nb x real_nall x (ng1 * 2) + if paddle.in_dynamic_mode(): + g1 = paddle.nn.functional.pad( + mix_g1.squeeze(0), + (0, 0, 0, real_n_padding), + value=0.0, + pad_from_left_axis=False, + ) + else: + _fill_shape = mix_g1.shape[1:] + _fill_shape[0] = real_n_padding + g1 = paddle.concat( + [ + mix_g1.squeeze(0), + paddle.zeros(_fill_shape, dtype=mix_g1.dtype), + ], + axis=0, + ) + + assert len(comm_dict) >= 6 + ret = paddle_ops_deepmd_border_op( + comm_dict[0], + comm_dict[1], + comm_dict[2], + comm_dict[3], + comm_dict[4], + g1, + comm_dict[5], + paddle.to_tensor( + [real_nloc], + dtype=paddle.int32, + place=paddle.CPUPlace(), + ), # should be int of c++, placed on cpu + paddle.to_tensor( + [real_nall - real_nloc], + dtype=paddle.int32, + place=paddle.CPUPlace(), + ), # should be int of c++, placed on cpu + ) + if not paddle.in_dynamic_mode(): + ret = paddle.assign(ret) + g1_ext = ret.unsqueeze(0) + if has_spin: + g1_real_ext, g1_virtual_ext = paddle.split( + g1_ext, [ng1, ng1], axis=2 + ) + g1_ext = concat_switch_virtual( + g1_real_ext, g1_virtual_ext, real_nloc + ) g1, g2, h2 = ll.forward( g1_ext, g2, diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py index 9cd9f7b0b7..92c010fcf0 100644 --- a/deepmd/pd/model/descriptor/se_a.py +++ b/deepmd/pd/model/descriptor/se_a.py @@ -260,7 +260,7 @@ def forward( atype_ext: paddle.Tensor, nlist: paddle.Tensor, mapping: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ): """Compute the descriptor. diff --git a/deepmd/pd/model/descriptor/se_atten.py b/deepmd/pd/model/descriptor/se_atten.py index 788ab211a7..39c3e6ace4 100644 --- a/deepmd/pd/model/descriptor/se_atten.py +++ b/deepmd/pd/model/descriptor/se_atten.py @@ -525,7 +525,10 @@ def forward( index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) # nb x (nloc x nnei) x nt atype_tebd_nlist = paddle.take_along_axis( - atype_tebd_ext, axis=1, indices=index + atype_tebd_ext, + axis=1, + indices=index, + broadcast=False, ) # j # nb x nloc x nnei x nt atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) @@ -563,14 +566,16 @@ def forward( nlist_index = nlist.reshape([nb, nloc * nnei]) # nf x (nl x nnei) nei_type = paddle.take_along_axis( - extended_atype, indices=nlist_index, axis=1 + extended_atype, indices=nlist_index, axis=1, broadcast=False ) # (nf x nl x nnei) x ng nei_type_index = nei_type.reshape([-1, 1]).expand([-1, ng]).to(paddle.int64) if self.type_one_side: tt_full = self.filter_layers_strip.networks[0](type_embedding) # (nf x nl x nnei) x ng - gg_t = paddle.take_along_axis(tt_full, indices=nei_type_index, axis=0) + gg_t = paddle.take_along_axis( + tt_full, indices=nei_type_index, axis=0, broadcast=False + ) else: idx_i = paddle.tile( atype.reshape([-1, 1]) * ntypes_with_padding, [1, nnei] @@ -594,7 +599,9 @@ def forward( ).reshape([-1, nt * 2]) tt_full = self.filter_layers_strip.networks[0](two_side_type_embedding) # (nf x nl x nnei) x ng - gg_t = paddle.take_along_axis(tt_full, axis=0, indices=idx) + gg_t = paddle.take_along_axis( + tt_full, axis=0, indices=idx, broadcast=False + ) # (nf x nl) x nnei x ng gg_t = gg_t.reshape([nfnl, nnei, ng]) if self.smooth: diff --git a/deepmd/pd/model/descriptor/se_t_tebd.py b/deepmd/pd/model/descriptor/se_t_tebd.py index 3ebf62d7a5..16e4de5d12 100644 --- a/deepmd/pd/model/descriptor/se_t_tebd.py +++ b/deepmd/pd/model/descriptor/se_t_tebd.py @@ -415,7 +415,7 @@ def forward( extended_atype: paddle.Tensor, nlist: paddle.Tensor, mapping: Optional[paddle.Tensor] = None, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ): """Compute the descriptor. @@ -845,7 +845,7 @@ def forward( # nb x (nloc x nnei) x nt # atype_tebd_nlist = paddle.take_along_axis(atype_tebd_ext, axis=1, index=index) atype_tebd_nlist = paddle.take_along_axis( - atype_tebd_ext, axis=1, indices=index + atype_tebd_ext, axis=1, indices=index, broadcast=False ) # nb x nloc x nnei x nt atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) @@ -869,7 +869,7 @@ def forward( nlist_index = nlist.reshape([nb, nloc * nnei]) # nf x (nl x nnei) nei_type = paddle.take_along_axis( - extended_atype, indices=nlist_index, axis=1 + extended_atype, indices=nlist_index, axis=1, broadcast=False ) # nfnl x nnei nei_type = nei_type.reshape([nfnl, nnei]) @@ -902,7 +902,7 @@ def forward( ).reshape([-1, nt * 2]) tt_full = self.filter_layers_strip.networks[0](two_side_type_embedding) # (nfnl x nt_i x nt_j) x ng - gg_t = paddle.take_along_axis(tt_full, indices=idx, axis=0) + gg_t = paddle.take_along_axis(tt_full, indices=idx, axis=0, broadcast=False) # (nfnl x nt_i x nt_j) x ng gg_t = gg_t.reshape([nfnl, nnei, nnei, ng]) if self.smooth: diff --git a/deepmd/pd/model/model/ener_model.py b/deepmd/pd/model/model/ener_model.py index a5b1b9d4b3..2e28ae0765 100644 --- a/deepmd/pd/model/model/ener_model.py +++ b/deepmd/pd/model/model/ener_model.py @@ -81,6 +81,10 @@ def forward( model_predict["atom_virial"] = model_ret["energy_derv_c"].squeeze( -3 ) + else: + model_predict["atom_virial"] = paddle.zeros( + [model_predict["energy"].shape[0], 1, 9], dtype=paddle.float64 + ) else: model_predict["force"] = model_ret["dforce"] if "mask" in model_ret: @@ -99,7 +103,7 @@ def forward_lower( fparam: Optional[paddle.Tensor] = None, aparam: Optional[paddle.Tensor] = None, do_atomic_virial: bool = False, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, ): model_ret = self.forward_common_lower( extended_coord, @@ -124,6 +128,10 @@ def forward_lower( model_predict["extended_virial"] = model_ret[ "energy_derv_c" ].squeeze(-3) + else: + model_predict["extended_virial"] = paddle.zeros( + [model_predict["energy"].shape[0], 1, 9], dtype=paddle.float64 + ) else: assert model_ret["dforce"] is not None model_predict["dforce"] = model_ret["dforce"] diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py index acb237b5ac..077dac1160 100644 --- a/deepmd/pd/model/model/make_model.py +++ b/deepmd/pd/model/model/make_model.py @@ -238,7 +238,7 @@ def forward_common_lower( fparam: Optional[paddle.Tensor] = None, aparam: Optional[paddle.Tensor] = None, do_atomic_virial: bool = False, - comm_dict: Optional[dict[str, paddle.Tensor]] = None, + comm_dict: Optional[list[paddle.Tensor]] = None, extra_nlist_sort: bool = False, ): """Return model prediction. Lower interface that takes @@ -364,7 +364,7 @@ def output_type_cast( continue if check_operation_applied(odef[kk], OutputVariableOperation.REDU): model_ret[kk] = ( - model_ret[kk].to(self.global_pd_ener_float_precision) + model_ret[kk].astype(self.global_pd_ener_float_precision) if model_ret[kk] is not None else None ) @@ -445,7 +445,7 @@ def _format_nlist( * paddle.ones( [n_nf, n_nloc, nnei - n_nnei], dtype=nlist.dtype, - ).to(nlist.place), + ), ], axis=-1, ) @@ -458,17 +458,21 @@ def _format_nlist( coord0 = extended_coord[:, :n_nloc, :] # nf x (nloc x nnei) x 3 index = nlist.reshape([n_nf, n_nloc * n_nnei, 1]).expand([-1, -1, 3]) - coord1 = paddle.take_along_axis(extended_coord, axis=1, indices=index) + coord1 = paddle.take_along_axis( + extended_coord, axis=1, indices=index, broadcast=False + ) # nf x nloc x nnei x 3 coord1 = coord1.reshape([n_nf, n_nloc, n_nnei, 3]) # nf x nloc x nnei rr = paddle.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) - rr = paddle.where(m_real_nei, rr, float("inf")) + rr = paddle.where(m_real_nei, rr, paddle.full_like(rr, float("inf"))) rr, nlist_mapping = ( paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1), ) - nlist = paddle.take_along_axis(nlist, axis=2, indices=nlist_mapping) + nlist = paddle.take_along_axis( + nlist, axis=2, indices=nlist_mapping, broadcast=False + ) nlist = paddle.where(rr > rcut, paddle.full_like(nlist, -1), nlist) nlist = nlist[..., :nnei] else: # not extra_nlist_sort and n_nnei <= nnei: diff --git a/deepmd/pd/model/model/transform_output.py b/deepmd/pd/model/model/transform_output.py index 469bfd3168..47004265c7 100644 --- a/deepmd/pd/model/model/transform_output.py +++ b/deepmd/pd/model/model/transform_output.py @@ -223,9 +223,7 @@ def communicate_extended_output( mapping = mapping.reshape(mldims + [1] * len(derv_r_ext_dims)).expand( [-1] * len(mldims) + derv_r_ext_dims ) - force = paddle.zeros(vldims + derv_r_ext_dims, dtype=vv.dtype).to( - device=vv.place - ) + force = paddle.zeros(vldims + derv_r_ext_dims, dtype=vv.dtype) # nf x nloc x nvar x 3 new_ret[kk_derv_r] = decomp.scatter_reduce( force, @@ -242,9 +240,7 @@ def communicate_extended_output( mapping, [1] * (len(mldims) + len(vdef.shape)) + [3], ) - virial = paddle.zeros(vldims + derv_c_ext_dims, dtype=vv.dtype).to( - device=vv.place - ) + virial = paddle.zeros(vldims + derv_c_ext_dims, dtype=vv.dtype) # nf x nloc x nvar x 9 new_ret[kk_derv_c] = decomp.scatter_reduce( virial, @@ -254,9 +250,9 @@ def communicate_extended_output( reduce="sum", ) new_ret[kk_derv_c + "_redu"] = paddle.sum( - new_ret[kk_derv_c].to(redu_prec), axis=1 + new_ret[kk_derv_c].astype(redu_prec), axis=1 ) - if not do_atomic_virial: + if not do_atomic_virial and paddle.in_dynamic_mode(): # pop atomic virial, because it is not correctly calculated. new_ret.pop(kk_derv_c) return new_ret diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py index 320fc55eed..68053896d1 100644 --- a/deepmd/pd/model/network/network.py +++ b/deepmd/pd/model/network/network.py @@ -211,9 +211,7 @@ def forward(self, device: str): Type embedding network. """ if not self.use_econf_tebd: - embed = self.embedding_net( - paddle.eye(self.ntypes, dtype=self.prec).to(device=device) - ) + embed = self.embedding_net(paddle.eye(self.ntypes, dtype=self.prec)) else: assert self.econf_tebd is not None embed = self.embedding_net(self.econf_tebd.to(device)) @@ -221,9 +219,7 @@ def forward(self, device: str): embed = paddle.concat( [ embed, - paddle.zeros([1, embed.shape[1]], dtype=self.prec).to( - device=device - ), + paddle.zeros([1, embed.shape[1]], dtype=self.prec), ] ) return embed diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py index a478c12f97..9dede6a897 100644 --- a/deepmd/pd/model/task/fitting.py +++ b/deepmd/pd/model/task/fitting.py @@ -4,6 +4,7 @@ abstractmethod, ) from typing import ( + Callable, Optional, Union, ) @@ -71,6 +72,93 @@ def share_params(self, base_class, shared_level, resume=False) -> None: else: raise NotImplementedError + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + protection: float = 1e-2, + ) -> None: + """ + Compute the input statistics (e.g. mean and stddev) for the fittings from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + protection : float + Divided-by-zero protection + """ + if self.numb_fparam == 0 and self.numb_aparam == 0: + # skip data statistics + return + if callable(merged): + sampled = merged() + else: + sampled = merged + # stat fparam + if self.numb_fparam > 0: + cat_data = paddle.concat([frame["fparam"] for frame in sampled], axis=0) + cat_data = paddle.reshape(cat_data, [-1, self.numb_fparam]) + fparam_avg = paddle.mean(cat_data, axis=0) + fparam_std = paddle.std(cat_data, axis=0, unbiased=False) + fparam_std = paddle.where( + fparam_std < protection, + paddle.to_tensor(protection, dtype=fparam_std.dtype), + fparam_std, + ) + fparam_inv_std = 1.0 / fparam_std + paddle.assign( + paddle.to_tensor( + fparam_avg, place=env.DEVICE, dtype=self.fparam_avg.dtype + ), + self.fparam_avg, + ) + paddle.assign( + paddle.to_tensor( + fparam_inv_std, place=env.DEVICE, dtype=self.fparam_inv_std.dtype + ), + self.fparam_inv_std, + ) + # stat aparam + if self.numb_aparam > 0: + sys_sumv = [] + sys_sumv2 = [] + sys_sumn = [] + for ss_ in [frame["aparam"] for frame in sampled]: + ss = paddle.reshape(ss_, [-1, self.numb_aparam]) + sys_sumv.append(paddle.sum(ss, axis=0)) + sys_sumv2.append(paddle.sum(ss * ss, axis=0)) + sys_sumn.append(ss.shape[0]) + sumv = paddle.sum(paddle.stack(sys_sumv), axis=0) + sumv2 = paddle.sum(paddle.stack(sys_sumv2), axis=0) + sumn = sum(sys_sumn) + aparam_avg = sumv / sumn + aparam_std = paddle.sqrt(sumv2 / sumn - (sumv / sumn) ** 2) + aparam_std = paddle.where( + aparam_std < protection, + paddle.to_tensor( + protection, dtype=aparam_std.dtype, place=aparam_std.device + ), + aparam_std, + ) + aparam_inv_std = 1.0 / aparam_std + paddle.assign( + paddle.to_tensor( + aparam_avg, place=env.DEVICE, dtype=self.aparam_avg.dtype + ), + self.aparam_avg, + ) + paddle.assign( + paddle.to_tensor( + aparam_inv_std, place=env.DEVICE, dtype=self.aparam_inv_std.dtype + ), + self.aparam_inv_std, + ) + class GeneralFitting(Fitting): """Construct a general fitting net. @@ -246,6 +334,8 @@ def __init__( for param in self.parameters(): param.stop_gradient = not self.trainable + self.eval_return_middle_output = False + def reinit_exclude( self, exclude_types: list[int] = [], @@ -368,6 +458,9 @@ def set_case_embd(self, case_idx: int): case_idx ] + def set_return_middle_output(self, return_middle_output: bool = True) -> None: + self.eval_return_middle_output = return_middle_output + def __setitem__(self, key, value) -> None: if key in ["bias_atom_e"]: value = value.reshape([self.ntypes, self._net_out_dim()]) @@ -427,9 +520,9 @@ def _forward_common( aparam: Optional[paddle.Tensor] = None, ): # cast the input to internal precsion - xx = descriptor.to(self.prec) - fparam = fparam.to(self.prec) if fparam is not None else None - aparam = aparam.to(self.prec) if aparam is not None else None + xx = descriptor.astype(self.prec) + fparam = fparam.astype(self.prec) if fparam is not None else None + aparam = aparam.astype(self.prec) if aparam is not None else None if self.remove_vaccum_contribution is not None: # TODO: compute the input for vaccm when remove_vaccum_contribution is set @@ -514,15 +607,37 @@ def _forward_common( outs = paddle.zeros( (nf, nloc, net_dim_out), dtype=env.GLOBAL_PD_FLOAT_PRECISION, - ).to(device=descriptor.place) + ) + results = {} + if self.mixed_types: - atom_property = self.filter_layers.networks[0](xx) + self.bias_atom_e[atype] + atom_property = self.filter_layers.networks[0](xx) + if self.eval_return_middle_output: + results["middle_output"] = self.filter_layers.networks[ + 0 + ].call_until_last(xx) if xx_zeros is not None: atom_property -= self.filter_layers.networks[0](xx_zeros) outs = ( - outs + atom_property + self.bias_atom_e[atype].to(self.prec) + outs + atom_property + self.bias_atom_e[atype].astype(self.prec) ) # Shape is [nframes, natoms[0], net_dim_out] else: + if self.eval_return_middle_output: + outs_middle = paddle.zeros( + (nf, nloc, self.neuron[-1]), + dtype=self.prec, + ).to(device=descriptor.place) # jit assertion + for type_i, ll in enumerate(self.filter_layers.networks): + mask = (atype == type_i).unsqueeze(-1) + mask = paddle.tile(mask, (1, 1, net_dim_out)) + middle_output_type = ll.call_until_last(xx) + middle_output_type = paddle.where( + paddle.tile(mask, (1, 1, self.neuron[-1])), + middle_output_type, + paddle.zeros_like(middle_output_type), + ) + outs_middle = outs_middle + middle_output_type + results["middle_output"] = outs_middle for type_i, ll in enumerate(self.filter_layers.networks): mask = (atype == type_i).unsqueeze(-1) mask.stop_gradient = True @@ -537,12 +652,15 @@ def _forward_common( ): atom_property -= ll(xx_zeros) atom_property = atom_property + self.bias_atom_e[type_i] - atom_property = paddle.where(mask, atom_property, 0.0) + atom_property = paddle.where( + mask, atom_property, paddle.full_like(atom_property, 0.0) + ) outs = ( outs + atom_property ) # Shape is [nframes, natoms[0], net_dim_out] # nf x nloc - mask = self.emask(atype).to("bool") + mask = self.emask(atype).astype("bool") # nf x nloc x nod - outs = paddle.where(mask[:, :, None], outs, 0.0) - return {self.var_name: outs.astype(env.GLOBAL_PD_FLOAT_PRECISION)} + outs = paddle.where(mask[:, :, None], outs, paddle.zeros_like(outs)) + results.update({self.var_name: outs}) + return results diff --git a/deepmd/pd/utils/exclude_mask.py b/deepmd/pd/utils/exclude_mask.py index 29c9cc3501..cde8730c9a 100644 --- a/deepmd/pd/utils/exclude_mask.py +++ b/deepmd/pd/utils/exclude_mask.py @@ -58,7 +58,7 @@ def forward( """ nf, natom = atype.shape - return self.type_mask[atype].reshape([nf, natom]).to(atype.place) + return self.type_mask[atype].reshape([nf, natom]) class PairExcludeMask(paddle.nn.Layer): @@ -126,31 +126,25 @@ def forward( """ if self.no_exclusion: # safely return 1 if nothing is excluded. - return paddle.ones_like(nlist, dtype=paddle.int32).to(device=nlist.place) + return paddle.ones_like(nlist, dtype=paddle.int32) nf, nloc, nnei = nlist.shape nall = atype_ext.shape[1] # add virtual atom of type ntypes. nf x nall+1 ae = paddle.concat( [ atype_ext, - self.ntypes - * paddle.ones([nf, 1], dtype=atype_ext.dtype).to(atype_ext.place), + self.ntypes * paddle.ones([nf, 1], dtype=atype_ext.dtype), ], axis=-1, ) type_i = atype_ext[:, :nloc].reshape([nf, nloc]) * (self.ntypes + 1) # nf x nloc x nnei index = paddle.where(nlist == -1, nall, nlist).reshape([nf, nloc * nnei]) - type_j = paddle.take_along_axis(ae, axis=1, indices=index).reshape( - [nf, nloc, nnei] - ) + type_j = paddle.take_along_axis( + ae, axis=1, indices=index, broadcast=False + ).reshape([nf, nloc, nnei]) type_ij = type_i[:, :, None] + type_j # nf x (nloc x nnei) type_ij = type_ij.reshape([nf, nloc * nnei]) - mask = ( - self.type_mask[type_ij] - .reshape([nf, nloc, nnei]) - .to(atype_ext.place) - .astype("bool") - ) + mask = self.type_mask[type_ij].reshape([nf, nloc, nnei]).astype("bool") return mask diff --git a/deepmd/pd/utils/nlist.py b/deepmd/pd/utils/nlist.py index 707cbd125b..9157fba61a 100644 --- a/deepmd/pd/utils/nlist.py +++ b/deepmd/pd/utils/nlist.py @@ -94,36 +94,36 @@ def build_neighbor_list( """ batch_size = coord.shape[0] - coord = coord.reshape([batch_size, -1]) nall = coord.shape[1] // 3 # fill virtual atoms with large coords so they are not neighbors of any # real atom. - # NOTE: control flow with double backward is not supported well yet by paddle.jit if not paddle.in_dynamic_mode() or decomp.numel(coord) > 0: xmax = paddle.max(coord) + 2.0 * rcut else: xmax = paddle.zeros([], dtype=coord.dtype).to(device=coord.place) + 2.0 * rcut + coord_xyz = coord.reshape([batch_size, nall, 3]) # nf x nall is_vir = atype < 0 - coord1 = paddle.where( - is_vir[:, :, None], xmax, coord.reshape([batch_size, nall, 3]) - ).reshape([batch_size, nall * 3]) + # batch_size x nall x 3 + vcoord_xyz = paddle.where(is_vir.unsqueeze(2), xmax, coord_xyz) if isinstance(sel, int): sel = [sel] - # nloc x 3 - coord0 = coord1[:, : nloc * 3] - # nloc x nall x 3 - diff = coord1.reshape([batch_size, -1, 3]).unsqueeze(1) - coord0.reshape( - [batch_size, -1, 3] - ).unsqueeze(2) + + # Get the coordinates for the local atoms (first nloc atoms) + # batch_size x nloc x 3 + vcoord_local_xyz = vcoord_xyz[:, :nloc, :] + + # Calculate displacement vectors. + diff = vcoord_xyz.unsqueeze(1) - vcoord_local_xyz.unsqueeze(2) if paddle.in_dynamic_mode(): assert list(diff.shape) == [batch_size, nloc, nall, 3] # nloc x nall rr = paddle.linalg.norm(diff, axis=-1) # if central atom has two zero distances, sorting sometimes can not exclude itself - rr = rr - paddle.eye(nloc, nall, dtype=rr.dtype).to(device=rr.place).unsqueeze(0) + rr = rr - paddle.eye(nloc, nall, dtype=rr.dtype).unsqueeze(0) rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) + # nloc x (nall-1) rr = rr[:, :, 1:] nlist = nlist[:, :, 1:] @@ -155,20 +155,13 @@ def _trim_mask_distinguish_nlist( rr = paddle.concat( [ rr, - paddle.ones([batch_size, nloc, nsel - nnei]).to( - device=rr.place, dtype=rr.dtype - ) + paddle.ones([batch_size, nloc, nsel - nnei]).astype(dtype=rr.dtype) + rcut, ], axis=-1, ) nlist = paddle.concat( - [ - nlist, - paddle.ones([batch_size, nloc, nsel - nnei], dtype=nlist.dtype).to( - device=rr.place - ), - ], + [nlist, paddle.ones([batch_size, nloc, nsel - nnei], dtype=nlist.dtype)], axis=-1, ) if paddle.in_dynamic_mode(): @@ -318,7 +311,11 @@ def nlist_distinguish_types( paddle.argsort(pick_mask, axis=-1, descending=True, stable=True), ) # nloc x s(nsel) - inlist = paddle.take_along_axis(nlist, axis=2, indices=imap) + inlist = paddle.take_along_axis( + nlist, + axis=2, + indices=imap, + ) inlist = inlist.masked_fill(~(pick_mask.to(paddle.bool)), -1) # nloc x nsel[ii] ret_nlist.append(paddle.split(inlist, [ss, snsel - ss], axis=-1)[0]) @@ -377,7 +374,7 @@ def build_multiple_neighbor_list( pad = -paddle.ones( [nb, nloc, nsels[-1] - nsel], dtype=nlist.dtype, - ).to(device=nlist.place) + ) # nb x nloc x nsel nlist = paddle.concat([nlist, pad], axis=-1) if paddle.is_tensor(nsel): @@ -399,9 +396,11 @@ def build_multiple_neighbor_list( .expand([-1, -1, 3]) ) # nb x nloc x nsel x 3 - coord2 = paddle.take_along_axis(coord1, axis=1, indices=index).reshape( - [nb, nloc, nsel, 3] - ) + coord2 = paddle.take_along_axis( + coord1, + axis=1, + indices=index, + ).reshape([nb, nloc, nsel, 3]) # nb x nloc x nsel x 3 diff = coord2 - coord0[:, :, None, :] # nb x nloc x nsel @@ -452,7 +451,7 @@ def extend_coord_with_ghosts( device = coord.place nf, nloc = atype.shape[:2] # int64 for index - aidx = paddle.tile(paddle.arange(nloc).to(device=device).unsqueeze(0), [nf, 1]) # pylint: disable=no-explicit-dtype + aidx = paddle.tile(paddle.arange(nloc).unsqueeze(0), [nf, 1]) # pylint: disable=no-explicit-dtype if cell is None: nall = nloc extend_coord = coord.clone() @@ -496,14 +495,12 @@ def extend_coord_with_ghosts( # .cpu() ) # pylint: disable=no-explicit-dtype eye_3 = ( - paddle.eye(3, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( - dtype=env.GLOBAL_PD_FLOAT_PRECISION - ) + paddle.eye(3) # .cpu() - ) - xyz = xi.reshape([-1, 1, 1, 1]) * eye_3[0] - xyz = xyz + yi.reshape([1, -1, 1, 1]) * eye_3[1] - xyz = xyz + zi.reshape([1, 1, -1, 1]) * eye_3[2] + ).to(dtype=env.GLOBAL_PD_FLOAT_PRECISION) + xyz = xi.reshape([-1, 1, 1, 1]).astype(eye_3.dtype) * eye_3[0] + xyz = xyz + yi.reshape([1, -1, 1, 1]).astype(eye_3.dtype) * eye_3[1] + xyz = xyz + zi.reshape([1, 1, -1, 1]).astype(eye_3.dtype) * eye_3[2] xyz = xyz.reshape([-1, 3]) # xyz = xyz.to(device=device) # ns x 3 @@ -519,7 +516,7 @@ def extend_coord_with_ghosts( # nf x ns x nloc extend_aidx = paddle.tile(aidx.unsqueeze(-2), [1, ns, 1]) return ( - extend_coord.reshape([nf, nall * 3]).to(device), - extend_atype.reshape([nf, nall]).to(device), - extend_aidx.reshape([nf, nall]).to(device), + extend_coord.reshape([nf, nall * 3]), + extend_atype.reshape([nf, nall]), + extend_aidx.reshape([nf, nall]), ) diff --git a/deepmd/pd/utils/serialization.py b/deepmd/pd/utils/serialization.py index f4206ce993..bd70deb75c 100644 --- a/deepmd/pd/utils/serialization.py +++ b/deepmd/pd/utils/serialization.py @@ -69,9 +69,9 @@ def deserialize_to_file(model_file: str, data: dict) -> None: model.forward, full_graph=True, input_spec=[ - InputSpec([1, -1, 3], dtype="float64", name="coord"), - InputSpec([1, -1], dtype="int64", name="atype"), - InputSpec([1, 9], dtype="float64", name="box"), + InputSpec([-1, -1, 3], dtype="float64", name="coord"), + InputSpec([-1, -1], dtype="int64", name="atype"), + InputSpec([-1, 9], dtype="float64", name="box"), None, None, True, @@ -88,9 +88,9 @@ def deserialize_to_file(model_file: str, data: dict) -> None: model.forward_lower, full_graph=True, input_spec=[ - InputSpec([1, -1, 3], dtype="float64", name="coord"), - InputSpec([1, -1], dtype="int32", name="atype"), - InputSpec([1, -1, -1], dtype="int32", name="nlist"), + InputSpec([-1, -1, 3], dtype="float64", name="coord"), + InputSpec([-1, -1], dtype="int32", name="atype"), + InputSpec([-1, -1, -1], dtype="int32", name="nlist"), None, None, None, @@ -101,4 +101,5 @@ def deserialize_to_file(model_file: str, data: dict) -> None: paddle.jit.save( model, model_file.split(".json")[0], + skip_prune_program=True, ) diff --git a/deepmd/pd/utils/stat.py b/deepmd/pd/utils/stat.py index e0abb1b289..ca9fa96703 100644 --- a/deepmd/pd/utils/stat.py +++ b/deepmd/pd/utils/stat.py @@ -12,9 +12,6 @@ import numpy as np import paddle -from deepmd.dpmodel.output_def import ( - FittingOutputDef, -) from deepmd.pd.utils import ( AtomExcludeMask, ) @@ -27,6 +24,7 @@ to_paddle_tensor, ) from deepmd.utils.out_stat import ( + compute_stats_do_not_distinguish_types, compute_stats_from_atomic, compute_stats_from_redu, ) @@ -117,7 +115,7 @@ def _save_to_file( stat_file_path: DPPath, bias_out: dict, std_out: dict, -): +) -> None: assert stat_file_path is not None stat_file_path.mkdir(exist_ok=True, parents=True) for kk, vv in bias_out.items(): @@ -136,6 +134,8 @@ def _post_process_stat( For global statistics, we do not have the std for each type of atoms, thus fake the output std by ones for all the types. + If the shape of out_std is already the same as out_bias, + we do not need to do anything. """ new_std = {} @@ -242,7 +242,8 @@ def compute_output_stats( rcond: Optional[float] = None, preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, model_forward: Optional[Callable[..., paddle.Tensor]] = None, - atomic_output: Optional[FittingOutputDef] = None, + stats_distinguish_types: bool = True, + intensive: bool = False, ): """ Compute the output statistics (e.g. energy bias) for the fitting net from packed data. @@ -272,8 +273,10 @@ def compute_output_stats( If not None, the model will be utilized to generate the original energy prediction, which will be subtracted from the energy label of the data. The difference will then be used to calculate the delta complement energy bias for each type. - atomic_output : FittingOutputDef, optional - The output of atomic model. + stats_distinguish_types : bool, optional + Whether to distinguish different element types in the statistics. + intensive : bool, optional + Whether the fitting target is intensive. """ # try to restore the bias from stat file bias_atom_e, std_atom_e = _restore_from_file(stat_file_path, keys) @@ -362,7 +365,8 @@ def compute_output_stats( rcond, preset_bias, model_pred_g, - atomic_output, + stats_distinguish_types, + intensive, ) bias_atom_a, std_atom_a = compute_output_stats_atomic( sampled, @@ -405,7 +409,8 @@ def compute_output_stats_global( rcond: Optional[float] = None, preset_bias: Optional[dict[str, list[Optional[paddle.Tensor]]]] = None, model_pred: Optional[dict[str, np.ndarray]] = None, - atomic_output: Optional[FittingOutputDef] = None, + stats_distinguish_types: bool = True, + intensive: bool = False, ): """This function only handle stat computation from reduced global labels.""" # return directly if model predict is empty for global @@ -469,26 +474,31 @@ def compute_output_stats_global( # subtract the model bias and output the delta bias stats_input = { - kk: merged_output[kk] - model_pred[kk] for kk in keys if kk in merged_output + kk: merged_output[kk] - model_pred[kk].reshape(merged_output[kk].shape) + for kk in keys + if kk in merged_output } bias_atom_e = {} std_atom_e = {} for kk in keys: if kk in stats_input: - if atomic_output is not None and atomic_output.get_data()[kk].intensive: - task_dim = stats_input[kk].shape[1] - assert merged_natoms[kk].shape == (nf[kk], ntypes) - stats_input[kk] = ( - merged_natoms[kk].sum(axis=1).reshape([-1, 1]) * stats_input[kk] + if not stats_distinguish_types: + bias_atom_e[kk], std_atom_e[kk] = ( + compute_stats_do_not_distinguish_types( + stats_input[kk], + merged_natoms[kk], + assigned_bias=assigned_atom_ener[kk], + intensive=intensive, + ) + ) + else: + bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_redu( + stats_input[kk], + merged_natoms[kk], + assigned_bias=assigned_atom_ener[kk], + rcond=rcond, ) - assert stats_input[kk].shape == (nf[kk], task_dim) - bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_redu( - stats_input[kk], - merged_natoms[kk], - assigned_bias=assigned_atom_ener[kk], - rcond=rcond, - ) else: # this key does not have global labels, skip it. continue @@ -515,10 +525,10 @@ def rmse(x): for kk in bias_atom_e.keys(): rmse_ae = rmse( ( - unbias_e[kk].reshape([nf[kk], -1]).astype(merged_output[kk].dtype) + unbias_e[kk].reshape([nf[kk], -1]) - merged_output[kk].reshape([nf[kk], -1]) ) - / atom_numbs[kk][:, None].astype(merged_output[kk].dtype) + / atom_numbs[kk][:, None] ) log.info( f"RMSE of {kk} per atom after linear regression is: {rmse_ae} in the unit of {kk}." @@ -549,7 +559,17 @@ def compute_output_stats_atomic( ] for kk in keys } - # shape: (nframes, nloc, ndim) + # reshape outputs [nframes, nloc * ndim] --> reshape to [nframes * nloc, 1, ndim] for concatenation + # reshape natoms [nframes, nloc] --> reshape to [nframes * nolc, 1] for concatenation + natoms = {k: [sys_v.reshape([-1, 1]) for sys_v in v] for k, v in natoms.items()} + outputs = { + k: [ + sys.reshape([natoms[k][sys_idx].shape[0], 1, -1]) + for sys_idx, sys in enumerate(v) + ] + for k, v in outputs.items() + } + merged_output = { kk: to_numpy_array(paddle.concat(outputs[kk])) for kk in keys diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index a678802a35..7e0f180e26 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -520,13 +520,16 @@ if(DEEPMD_C_ROOT) endif() if(NOT DEEPMD_C_ROOT) + add_subdirectory(lib/) if(ENABLE_TENSORFLOW) add_subdirectory(op/tf/) endif() if(ENABLE_PYTORCH) add_subdirectory(op/pt/) endif() - add_subdirectory(lib/) + if(ENABLE_PADDLE) + add_subdirectory(op/pd/) + endif() endif() if(BUILD_PY_IF) add_subdirectory(config/) diff --git a/source/api_cc/CMakeLists.txt b/source/api_cc/CMakeLists.txt index 0d4bba1047..90b7c08449 100644 --- a/source/api_cc/CMakeLists.txt +++ b/source/api_cc/CMakeLists.txt @@ -6,7 +6,6 @@ file(GLOB LIB_SRC src/*.cc src/*.cpp) file(GLOB INC_SRC include/*.h ${CMAKE_CURRENT_BINARY_DIR}/version.h) set(libname "${LIB_DEEPMD_CC}") - add_library(${libname} SHARED ${LIB_SRC}) # link: libdeepmd libdeepmd_op libtensorflow_cc libtensorflow_framework @@ -48,7 +47,7 @@ set_target_properties( ${libname} PROPERTIES INSTALL_RPATH "$ORIGIN;${BACKEND_LIBRARY_PATH}" INSTALL_RPATH_USE_LINK_PATH TRUE - BUILD_RPATH "$ORIGIN/../op/tf;$ORIGIN/../op/pt") + BUILD_RPATH "$ORIGIN/../op/tf;$ORIGIN/../op/pt;$ORIGIN/../op/pd") target_compile_definitions(${libname} PRIVATE TF_PRIVATE) if(CMAKE_TESTING_ENABLED) target_link_libraries(${libname} PRIVATE coverage_config) diff --git a/source/api_cc/include/DeepPotPD.h b/source/api_cc/include/DeepPotPD.h index ec43300ca0..5908f4003e 100644 --- a/source/api_cc/include/DeepPotPD.h +++ b/source/api_cc/include/DeepPotPD.h @@ -392,7 +392,7 @@ class DeepPotPD : public DeepPotBackend { int do_message_passing; // 1:dpa2 model 0:others bool gpu_enabled; std::unique_ptr firstneigh_tensor; - // std::unordered_map comm_dict; # Not used yet + std::unique_ptr mapping_tensor; }; } // namespace deepmd diff --git a/source/api_cc/src/DeepPotPD.cc b/source/api_cc/src/DeepPotPD.cc index 3a3d880c4b..21a1254b67 100644 --- a/source/api_cc/src/DeepPotPD.cc +++ b/source/api_cc/src/DeepPotPD.cc @@ -11,6 +11,159 @@ using namespace deepmd; +#include +#include +#include +#include +#include + +class Logger { + public: + enum Level { DEBUG = 0, INFO = 1, WARNING = 2, ERROR = 3 }; + + private: + static Level minLevel; + static bool colorEnabled; + static bool showTimestamp; + + static const char* getColorCode(Level level) { + if (!colorEnabled) { + return ""; + } + switch (level) { + case DEBUG: + return "\033[1;36m"; + case INFO: + return "\033[1;32m"; + case WARNING: + return "\033[1;33m"; + case ERROR: + return "\033[1;31m"; + default: + return ""; + } + } + + static const char* getResetCode() { return colorEnabled ? "\033[0m" : ""; } + + static const char* getLevelName(Level level) { + switch (level) { + case DEBUG: + return "DEBUG"; + case INFO: + return "INFO"; + case WARNING: + return "WARNING"; + case ERROR: + return "ERROR"; + default: + return "UNKNOWN"; + } + } + + static std::string getCurrentTime() { + if (!showTimestamp) { + return ""; + } + + std::time_t now = std::time(0); + std::tm* ltm = std::localtime(&now); + + std::ostringstream oss; + oss << std::setfill('0') << std::setw(4) << (1900 + ltm->tm_year) << "-" + << std::setw(2) << (1 + ltm->tm_mon) << "-" << std::setw(2) + << ltm->tm_mday << " " << std::setw(2) << ltm->tm_hour << ":" + << std::setw(2) << ltm->tm_min << ":" << std::setw(2) << ltm->tm_sec; + return oss.str(); + } + + public: + class LogStream { + private: + std::ostringstream oss; + Level level; + bool shouldLog; + + public: + LogStream(Level lvl) : level(lvl), shouldLog(lvl >= minLevel) { + if (shouldLog) { + std::string timestamp = getCurrentTime(); + if (!timestamp.empty()) { + oss << "[" << timestamp << "] "; + } + oss << getColorCode(level) << "[" << getLevelName(level) << "]" + << getResetCode() << " "; + } + } + + ~LogStream() { + if (shouldLog) { + std::cout << oss.str() << std::flush; + } + } + + template + LogStream& operator<<(const T& value) { + if (shouldLog) { + oss << value; + } + return *this; + } + + LogStream& operator<<(std::ostream& (*manip)(std::ostream&)) { + if (shouldLog) { + oss << manip; + } + return *this; + } + + LogStream(const LogStream&) = delete; + LogStream& operator=(const LogStream&) = delete; + LogStream(LogStream&& other) noexcept + : oss(std::move(other.oss)), + level(other.level), + shouldLog(other.shouldLog) {} + + LogStream& operator=(LogStream&& other) noexcept { + if (this != &other) { + oss = std::move(other.oss); + level = other.level; + shouldLog = other.shouldLog; + } + return *this; + } + }; + + static void setLevel(Level level) { minLevel = level; } + static void enableColor(bool enable = true) { colorEnabled = enable; } + static void enableTimestamp(bool enable = true) { showTimestamp = enable; } + static Level getLevel() { return minLevel; } + static bool isColorEnabled() { return colorEnabled; } + static bool isTimestampEnabled() { return showTimestamp; } + + static LogStream debug() { return LogStream(DEBUG); } + static LogStream info() { return LogStream(INFO); } + static LogStream warning() { return LogStream(WARNING); } + static LogStream error() { return LogStream(ERROR); } +}; + +Logger::Level Logger::minLevel = Logger::INFO; +bool Logger::colorEnabled = true; +bool Logger::showTimestamp = true; + +namespace logg { +inline Logger::LogStream debug() { return Logger::debug(); } +inline Logger::LogStream info() { return Logger::info(); } +inline Logger::LogStream warning() { return Logger::warning(); } +inline Logger::LogStream error() { return Logger::error(); } + +inline void setLevel(Logger::Level level) { Logger::setLevel(level); } +inline void enableColor(bool enable = true) { Logger::enableColor(enable); } +inline void enableTimestamp(bool enable = true) { + Logger::enableTimestamp(enable); +} +} // namespace logg + std::vector createNlistTensorPD( const std::vector>& data) { std::vector ret; @@ -41,9 +194,7 @@ void DeepPotPD::init(const std::string& model, << std::endl; return; } - // NOTE: There is no custom operators need to be loaded now. - // deepmd::load_op_library(); - + deepmd::load_op_library(); // NOTE: Only support 1 GPU now. int gpu_num = 1; if (gpu_num > 0) { @@ -59,6 +210,7 @@ void DeepPotPD::init(const std::string& model, config->EnableNewIR(true); config->EnableCustomPasses({"add_shadow_output_after_dead_parameter_pass"}, true); + // config->SwitchIrOptim(false); // initialize inference config_fl config_fl = std::make_shared(); @@ -67,6 +219,7 @@ void DeepPotPD::init(const std::string& model, config_fl->EnableNewIR(true); config_fl->EnableCustomPasses({"add_shadow_output_after_dead_parameter_pass"}, true); + // config_fl->SwitchIrOptim(false); // loading inference model std::string pdmodel_path, fl_pdmodel_path; @@ -113,30 +266,31 @@ void DeepPotPD::init(const std::string& model, if (!gpu_enabled) { config->DisableGpu(); config_fl->DisableGpu(); - std::cout << "load model from: " << model << " to cpu " << std::endl; + logg::info() << "load model from: " << model << " to cpu " << std::endl; } else { config->EnableUseGpu(4096, 0); config_fl->EnableUseGpu(4096, 0); - std::cout << "load model from: " << model << " to gpu:" << gpu_id - << std::endl; + logg::info() << "load model from: " << model << " to gpu:" << gpu_id + << std::endl; } if (config->cinn_enabled()) { - std::cout << "model.forward will be compiled with cinn." << std::endl; + logg::info() << "model.forward will be compiled with cinn." << std::endl; } else { - std::cout << "NOTE: You can try: \n'export FLAGS_prim_all=true" - " FLAGS_enable_pir_in_executor=1" - " FLAGS_prim_enable_dynamic=true FLAGS_use_cinn=true'\n" - "to speed up C++ inference with paddle backend" - << std::endl; + logg::info() << "NOTE: You can try: \n'export FLAGS_prim_all=true" + " FLAGS_enable_pir_in_executor=1" + " FLAGS_prim_enable_dynamic=true FLAGS_use_cinn=true' " + "to speed up C++ inference with paddle backend" + << std::endl; } if (config_fl->cinn_enabled()) { - std::cout << "model.forward_lower will be compiled with cinn." << std::endl; + logg::info() << "model.forward_lower will be compiled with cinn." + << std::endl; } else { - std::cout << "NOTE: You can try: \n'export FLAGS_prim_all=true" - " FLAGS_enable_pir_in_executor=1" - " FLAGS_prim_enable_dynamic=true FLAGS_use_cinn=true'\n" - "to speed up C++ inference with paddle backend" - << std::endl; + logg::info() << "NOTE: You can try: \n'export FLAGS_prim_all=true" + " FLAGS_enable_pir_in_executor=1" + " FLAGS_prim_enable_dynamic=true FLAGS_use_cinn=true' " + "to speed up C++ inference with paddle backend" + << std::endl; } // NOTE: Both set to 1 now. @@ -151,16 +305,42 @@ void DeepPotPD::init(const std::string& model, } predictor = paddle_infer::CreatePredictor(*config); + logg::info() << "Setup model.forward model" << std::endl; predictor_fl = paddle_infer::CreatePredictor(*config_fl); + logg::info() << "Setup model.forward_lower" << std::endl; + auto print_handle_names = [](const std::vector& name_vec) { + int n = name_vec.size(); + std::string ret; + for (int i = 0; i < n; ++i) { + ret += "[" + std::to_string(i) + "]" + name_vec[i] + " \n"[i == n - 1]; + } + logg::debug() << ret; + }; + logg::debug() << "Input names of model.forward below:" << std::endl; + print_handle_names(predictor->GetInputNames()); + logg::debug() << "Output names of model.forward below:" << std::endl; + print_handle_names(predictor->GetOutputNames()); + std::cout << std::endl; + logg::debug() << "Input names of model.forward_lower below:" << std::endl; + print_handle_names(predictor_fl->GetInputNames()); + logg::debug() << "Output names of model.forward_lower below:" << std::endl; + print_handle_names(predictor_fl->GetOutputNames()); // initialize hyper params from model buffers ntypes_spin = 0; DeepPotPD::get_buffer("buffer_has_message_passing", do_message_passing); + logg::debug() << "buffer_has_message_passing = " << this->do_message_passing + << std::endl; DeepPotPD::get_buffer("buffer_rcut", rcut); + logg::debug() << "buffer_rcut = " << this->rcut << std::endl; DeepPotPD::get_buffer("buffer_ntypes", ntypes); + logg::debug() << "buffer_ntypes = " << this->ntypes << std::endl; DeepPotPD::get_buffer("buffer_dfparam", dfparam); + logg::debug() << "buffer_dfparam = " << this->dfparam << std::endl; DeepPotPD::get_buffer("buffer_daparam", daparam); + logg::debug() << "buffer_daparam = " << this->daparam << std::endl; DeepPotPD::get_buffer("buffer_aparam_nall", aparam_nall); + logg::debug() << "buffer_aparam_nall = " << this->aparam_nall << std::endl; inited = true; } DeepPotPD::~DeepPotPD() {} @@ -205,51 +385,81 @@ void DeepPotPD::compute(ENERGYVTYPE& ener, nlist_data.shuffle_exclude_empty(fwd_map); nlist_data.padding(); if (do_message_passing == 1 && nghost > 0) { - throw deepmd::deepmd_exception( - "(do_message_passing == 1 && nghost > 0) is not supported yet."); - // int nswap = lmp_list.nswap; - // auto sendproc_tensor = predictor_fl->GetInputHandle("sendproc"); - // sendproc_tensor->Reshape({nswap}); - // sendproc_tensor->CopyFromCpu(lmp_list.sendproc); - // auto recvproc_tensor = predictor_fl->GetInputHandle("recvproc"); - // recvproc_tensor->Reshape({nswap}); - // recvproc_tensor->CopyFromCpu(lmp_list.recvproc); - // auto firstrecv_tensor = predictor_fl->GetInputHandle("firstrecv"); - // firstrecv_tensor->Reshape({nswap}); - // firstrecv_tensor->CopyFromCpu(lmp_list.firstrecv); - // auto recvnum_tensor = predictor_fl->GetInputHandle("recvnum"); - // recvnum_tensor->Reshape({nswap}); - // recvnum_tensor->CopyFromCpu(lmp_list.recvnum); - // auto sendnum_tensor = predictor_fl->GetInputHandle("sendnum"); - // sendnum_tensor->Reshape({nswap}); - // sendnum_tensor->CopyFromCpu(lmp_list.sendnum); - // auto communicator_tensor = - // predictor_fl->GetInputHandle("communicator"); - // communicator_tensor->Reshape({1}); - // communicator_tensor->CopyFromCpu(static_cast(lmp_list.world)); - // auto sendlist_tensor = predictor_fl->GetInputHandle("sendlist"); - - // int total_send = - // std::accumulate(lmp_list.sendnum, lmp_list.sendnum + nswap, 0); + auto sendproc_tensor = predictor_fl->GetInputHandle("send_proc"); + auto recvproc_tensor = predictor_fl->GetInputHandle("recv_proc"); + auto recvnum_tensor = predictor_fl->GetInputHandle("recv_num"); + auto sendnum_tensor = predictor_fl->GetInputHandle("send_num"); + auto communicator_tensor = predictor_fl->GetInputHandle("communicator"); + auto sendlist_tensor = predictor_fl->GetInputHandle("send_list"); + + int nswap = lmp_list.nswap; + sendproc_tensor->Reshape({nswap}); + sendproc_tensor->CopyFromCpu(lmp_list.sendproc); + + recvproc_tensor->Reshape({nswap}); + recvproc_tensor->CopyFromCpu(lmp_list.recvproc); + + recvnum_tensor->Reshape({nswap}); + recvnum_tensor->CopyFromCpu(lmp_list.recvnum); + + sendnum_tensor->Reshape({nswap}); + if (sizeof(lmp_list.sendnum[0]) != sizeof(int32_t)) { + std::vector temp_data(nswap); + for (int i = 0; i < nswap; i++) { + temp_data[i] = static_cast(lmp_list.sendnum[i]); + } + sendnum_tensor->CopyFromCpu(temp_data.data()); + } else { + sendnum_tensor->CopyFromCpu(lmp_list.sendnum); + } + communicator_tensor->Reshape({1}); + if (lmp_list.world) { + communicator_tensor->CopyFromCpu(static_cast(lmp_list.world)); + } + + assert(sizeof(std::intptr_t) == 8); + int total_send = + std::accumulate(lmp_list.sendnum, lmp_list.sendnum + nswap, 0); + sendlist_tensor->Reshape({total_send}); + + /** + ** NOTE: paddle do not support construct a Tensor with from_blob(T**, ...) + ** from a double pointer, so we convert int* pointer to indptr_t for each + ** entry and wrap it into int64 Tensor as a workaround. + */ + std::vector pointer_addresses; + pointer_addresses.reserve(nswap); + for (int iswap = 0; iswap < nswap; ++iswap) { + std::intptr_t addr = + reinterpret_cast(lmp_list.sendlist[iswap]); + pointer_addresses.push_back(addr); + } + sendlist_tensor->CopyFromCpu(pointer_addresses.data()); } - if (do_message_passing == 1 && nghost == 0) { - throw deepmd::deepmd_exception( - "(do_message_passing == 1 && nghost == 0) is not supported yet."); + if (lmp_list.mapping) { + std::vector mapping(nall_real); + for (size_t ii = 0; ii < nall_real; ii++) { + mapping[ii] = lmp_list.mapping[fwd_map[ii]]; + } + this->mapping_tensor = predictor_fl->GetInputHandle("mapping"); + this->mapping_tensor->Reshape({1, nall_real}); + this->mapping_tensor->CopyFromCpu(mapping.data()); } } std::vector firstneigh = createNlistTensorPD(nlist_data.jlist); - firstneigh_tensor = predictor_fl->GetInputHandle("nlist"); - firstneigh_tensor->Reshape({1, nloc, (int)firstneigh.size() / (int)nloc}); - firstneigh_tensor->CopyFromCpu(firstneigh.data()); + this->firstneigh_tensor = predictor_fl->GetInputHandle("nlist"); + this->firstneigh_tensor->Reshape( + {1, nloc, (int)firstneigh.size() / (int)nloc}); + this->firstneigh_tensor->CopyFromCpu(firstneigh.data()); bool do_atom_virial_tensor = atomic; - std::unique_ptr fparam_tensor; if (!fparam.empty()) { + std::unique_ptr fparam_tensor; fparam_tensor = predictor_fl->GetInputHandle("fparam"); fparam_tensor->Reshape({1, static_cast(fparam.size())}); - fparam_tensor->CopyFromCpu((fparam.data())); + fparam_tensor->CopyFromCpu(fparam.data()); } - std::unique_ptr aparam_tensor; if (!aparam_.empty()) { + std::unique_ptr aparam_tensor; aparam_tensor = predictor_fl->GetInputHandle("aparam"); aparam_tensor->Reshape( {1, lmp_list.inum, static_cast(aparam_.size()) / lmp_list.inum}); diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index 70755c901a..eace577f89 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -415,6 +415,9 @@ void deepmd::load_op_library() { #endif #ifdef BUILD_PYTORCH _load_single_op_library("deepmd_op_pt"); +#endif +#ifdef BUILD_PADDLE + _load_single_op_library("deepmd_op_pd"); #endif // load customized plugins const char* env_customized_plugins = std::getenv("DP_PLUGIN_PATH"); @@ -1419,7 +1422,9 @@ deepmd::DPBackend deepmd::get_backend(const std::string& model) { model.substr(model.length() - 11) == ".savedmodel") { return deepmd::DPBackend::JAX; } else if ((model.length() >= 5 && - model.substr(model.length() - 5) == ".json")) { + model.substr(model.length() - 5) == ".json") || + (model.length() >= 8 && + model.substr(model.length() - 8) == ".pdmodel")) { return deepmd::DPBackend::Paddle; } throw deepmd::deepmd_exception("Unsupported model file format"); diff --git a/source/op/pd/CMakeLists.txt b/source/op/pd/CMakeLists.txt new file mode 100644 index 0000000000..194ceb4061 --- /dev/null +++ b/source/op/pd/CMakeLists.txt @@ -0,0 +1,72 @@ +file(GLOB OP_SRC comm.cc) + +add_library(deepmd_op_pd SHARED ${OP_SRC}) + +if(NOT DEFINED PADDLE_INFERENCE_DIR) + message( + FATAL_ERROR + "please set PADDLE_INFERENCE_DIR with -DPADDLE_INFERENCE_DIR=/path/to/paddle_inference" + ) +endif() + +set(PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH + ${PADDLE_INFERENCE_DIR}/third_party/install) +include_directories(${PADDLE_INFERENCE_DIR}) +include_directories(${PADDLE_INFERENCE_DIR}/paddle/include) +include_directories(${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/protobuf/include) +include_directories(${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/glog/include) +include_directories(${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/gflags/include) +include_directories(${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/xxhash/include) + +set(PADDLE_INFER_LIB ${PADDLE_INFERENCE_DIR}/paddle/lib/libpaddle_inference.so) +set(MKLML_LIB + ${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/mklml/lib/libmklml_intel.so + ${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/mklml/lib/libiomp5.so) + +target_link_libraries(deepmd_op_pd PRIVATE ${LIB_DEEPMD}) + +if(APPLE) + set_target_properties(deepmd_op_pd PROPERTIES INSTALL_RPATH "@loader_path") +else() + set_target_properties(deepmd_op_pd PROPERTIES INSTALL_RPATH "$ORIGIN") +endif() + +find_package(MPI) +if(MPI_FOUND) + include(CheckCXXSymbolExists) + set(CMAKE_REQUIRED_INCLUDES ${MPI_CXX_INCLUDE_DIRS}) + set(CMAKE_REQUIRED_LIBRARIES ${MPI_CXX_LIBRARIES}) + check_cxx_symbol_exists(MPIX_Query_cuda_support "mpi.h" CUDA_AWARE) + if(NOT CUDA_AWARE) + check_cxx_symbol_exists(MPIX_Query_cuda_support "mpi.h;mpi-ext.h" OMP_CUDA) + if(NOT OMP_CUDA) + target_compile_definitions(deepmd_op_pd PRIVATE NO_CUDA_AWARE) + endif() + endif() + target_link_libraries(deepmd_op_pd PRIVATE MPI::MPI_CXX) + target_compile_definitions(deepmd_op_pd PRIVATE USE_MPI) +endif() + +if(CMAKE_TESTING_ENABLED) + target_link_libraries(deepmd_op_pd PRIVATE coverage_config) +endif() + +target_link_libraries( + deepmd_op_pd + PRIVATE ${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/glog/lib/libglog.a + ${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/gflags/lib/libgflags.a + ${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/xxhash/lib/libxxhash.a + ${PADDLE_INFERENCE_DIR_THIRD_PARTY_PATH}/protobuf/lib/libprotobuf.a + ${PADDLE_INFER_LIB} + ${MKLML_LIB} + dl + pthread) + +if(BUILD_PY_IF) + install(TARGETS deepmd_op_pd DESTINATION deepmd/lib/) +else(BUILD_PY_IF) + install( + TARGETS deepmd_op_pd + EXPORT DeePMDTargets + DESTINATION lib/) +endif(BUILD_PY_IF) diff --git a/source/op/pd/comm.cc b/source/op/pd/comm.cc new file mode 100644 index 0000000000..548e5db83a --- /dev/null +++ b/source/op/pd/comm.cc @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later + +#ifdef USE_MPI +#include +#ifdef OMPI_MPI_H +#include +#endif +#endif +#include + +#include "paddle/extension.h" + +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) +#include "device.h" +#endif + +#ifdef USE_MPI +template +static MPI_Datatype get_mpi_type(); + +template <> +MPI_Datatype get_mpi_type() { + return MPI_FLOAT; +} + +template <> +MPI_Datatype get_mpi_type() { + return MPI_DOUBLE; +} +#endif + +#ifdef USE_MPI +static void unpack_communicator(const paddle::Tensor& communicator_tensor, + MPI_Comm& mpi_comm) { +#ifdef OMPI_MPI_H + const int64_t* communicator = communicator_tensor.data(); +#else + const int64_t* ptr = communicator_tensor.data(); + const int* communicator = reinterpret_cast(ptr); +#endif + mpi_comm = reinterpret_cast(*communicator); +} +#endif + +template +void Border_forward_t(const paddle::Tensor& sendlist_tensor, + const paddle::Tensor& sendproc_tensor, + const paddle::Tensor& recvproc_tensor, + const paddle::Tensor& sendnum_tensor, + const paddle::Tensor& recvnum_tensor, + paddle::Tensor& g1, + const paddle::Tensor& communicator_tensor, + const paddle::Tensor& nlocal_tensor, + const paddle::Tensor& nghost_tensor) { + int64_t send_list_len = sendlist_tensor.numel(); + + paddle::Tensor cpu_sendlist = paddle::empty( + {send_list_len}, paddle::DataType::INT64, paddle::CPUPlace()); + cpu_sendlist.copy_(sendlist_tensor, paddle::CPUPlace(), true); + int64_t* sendlist = cpu_sendlist.data(); + + int nswap = sendproc_tensor.dims()[0]; + + paddle::Tensor cpu_sendproc = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_sendproc.copy_(sendproc_tensor, paddle::CPUPlace(), true); + int* sendproc = cpu_sendproc.data(); + + paddle::Tensor cpu_recvproc = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_recvproc.copy_(recvproc_tensor, paddle::CPUPlace(), true); + int* recvproc = cpu_recvproc.data(); + + paddle::Tensor cpu_sendnum = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_sendnum.copy_(sendnum_tensor, paddle::CPUPlace(), true); + int* sendnum = cpu_sendnum.data(); + + paddle::Tensor cpu_recvnum = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_recvnum.copy_(recvnum_tensor, paddle::CPUPlace(), true); + int* recvnum = cpu_recvnum.data(); + + int tensor_size = g1.dims()[1]; + + paddle::Tensor cpu_nlocal = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_nlocal.copy_(nlocal_tensor, paddle::CPUPlace(), true); + int nlocal = *(cpu_nlocal.data()); + + paddle::Tensor cpu_nghost = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_nghost.copy_(nghost_tensor, paddle::CPUPlace(), true); + int nghost = *(cpu_nghost.data()); + + int ntotal = nlocal + nghost; + + paddle::Tensor recv_g1_tensor = g1; + +#ifdef USE_MPI + // MPI initialization check + int mpi_init = 0; + MPI_Initialized(&mpi_init); + int cuda_aware = 1; + int me = 0; + MPI_Comm world; + int world_size = 0; + + if (mpi_init) { + unpack_communicator(communicator_tensor, world); + MPI_Comm_rank(world, &me); + MPI_Comm_size(world, &world_size); + } + + MPI_Datatype mpi_type = get_mpi_type(); + MPI_Request request; + +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) + if (world_size >= 1) { + int version, subversion; + MPI_Get_version(&version, &subversion); + if (version >= 4) { +#ifdef NO_CUDA_AWARE + cuda_aware = 0; +#else + cuda_aware = MPIX_Query_cuda_support(); +#endif + } else { + cuda_aware = 0; + } + + if (cuda_aware == 0) { + recv_g1_tensor = paddle::empty_like(g1, g1.dtype(), paddle::CPUPlace()); + recv_g1_tensor.copy_(g1, recv_g1_tensor.place(), true); + } + } +#endif + +#endif // USE_MPI + FPTYPE* recv_g1 = recv_g1_tensor.data() + nlocal * tensor_size; + + for (int iswap = 0; iswap < nswap; ++iswap) { + int nrecv = recvnum[iswap]; + int nsend = sendnum[iswap]; + paddle::Tensor isendlist; + paddle::Tensor send_g1_tensor; + FPTYPE* send_g1 = nullptr; + + if (nsend != 0) { + std::intptr_t addr = static_cast(sendlist[iswap]); + int* isendlist_ptr = reinterpret_cast(addr); + isendlist = + paddle::from_blob(isendlist_ptr, {nsend}, paddle::DataType::INT32, + phi::DataLayout::NCHW, paddle::CPUPlace()) + .copy_to(recv_g1_tensor.place(), true); + send_g1_tensor = + paddle::experimental::index_select(recv_g1_tensor, isendlist, 0); + send_g1 = send_g1_tensor.data(); + } + +#ifdef USE_MPI + if (sendproc[iswap] != me) { + if (nrecv) { + MPI_Irecv(recv_g1, nrecv * tensor_size, mpi_type, recvproc[iswap], 0, + world, &request); + } + if (nsend) { + MPI_Send(send_g1, nsend * tensor_size, mpi_type, sendproc[iswap], 0, + world); + } + if (nrecv) { + MPI_Wait(&request, MPI_STATUS_IGNORE); + } + } else { +#endif + +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) +#ifdef USE_MPI + if (cuda_aware == 0) { + memcpy(recv_g1, send_g1, + (unsigned long)nsend * tensor_size * sizeof(FPTYPE)); + } else { + gpuMemcpy(recv_g1, send_g1, + (unsigned long)nsend * tensor_size * sizeof(FPTYPE), + gpuMemcpyDeviceToDevice); + } +#else + gpuMemcpy(recv_g1, send_g1, + (unsigned long)nsend * tensor_size * sizeof(FPTYPE), + gpuMemcpyDeviceToDevice); +#endif + +#else + memcpy(recv_g1, send_g1, + (unsigned long)nsend * tensor_size * sizeof(FPTYPE)); +#endif + +#ifdef USE_MPI + } +#endif + recv_g1 += nrecv * tensor_size; + } + +#ifdef USE_MPI +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) + if (cuda_aware == 0) { + g1.copy_(recv_g1_tensor, g1.place(), true); + } +#endif +#endif +} + +void Border_forward(const paddle::Tensor& sendlist_tensor, + const paddle::Tensor& sendproc_tensor, + const paddle::Tensor& recvproc_tensor, + const paddle::Tensor& sendnum_tensor, + const paddle::Tensor& recvnum_tensor, + paddle::Tensor& g1_tensor, + const paddle::Tensor& communicator_tensor, + const paddle::Tensor& nlocal_tensor, + const paddle::Tensor& nghost_tensor) { + bool type_flag = (g1_tensor.dtype() == phi::DataType::FLOAT64) ? true : false; + if (type_flag) { + Border_forward_t(sendlist_tensor, sendproc_tensor, recvproc_tensor, + sendnum_tensor, recvnum_tensor, g1_tensor, + communicator_tensor, nlocal_tensor, nghost_tensor); + } else { + Border_forward_t(sendlist_tensor, sendproc_tensor, recvproc_tensor, + sendnum_tensor, recvnum_tensor, g1_tensor, + communicator_tensor, nlocal_tensor, nghost_tensor); + } +} + +template +void Border_backward_t(const paddle::Tensor& sendlist_tensor, + const paddle::Tensor& sendproc_tensor, + const paddle::Tensor& recvproc_tensor, + const paddle::Tensor& sendnum_tensor, + const paddle::Tensor& recvnum_tensor, + const paddle::Tensor& g1_tensor, + const paddle::Tensor& communicator_tensor, + const paddle::Tensor& nlocal_tensor, + const paddle::Tensor& nghost_tensor, + paddle::Tensor& recv_g1_tensor_grad) { +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) + gpuDeviceSynchronize(); +#endif + paddle::Tensor d_local_g1_tensor = + paddle::empty(recv_g1_tensor_grad.shape(), recv_g1_tensor_grad.dtype(), + recv_g1_tensor_grad.place()); + d_local_g1_tensor.copy_(recv_g1_tensor_grad.contiguous(), + d_local_g1_tensor.place(), true); + +#ifdef USE_MPI + int mpi_init = 0, world_size = 0, me = 0, cuda_aware = 1; + MPI_Initialized(&mpi_init); + + MPI_Comm world; + if (mpi_init) { + unpack_communicator(communicator_tensor, world); + MPI_Comm_rank(world, &me); + MPI_Comm_size(world, &world_size); + } + + auto mpi_type = get_mpi_type(); + MPI_Request request; + +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) + if (world_size >= 1) { + int version, subversion; + MPI_Get_version(&version, &subversion); + + if (version >= 4) { +#ifdef NO_CUDA_AWARE + cuda_aware = 0; +#else + cuda_aware = MPIX_Query_cuda_support(); +#endif + } else { + cuda_aware = 0; + } + + if (cuda_aware == 0) { + d_local_g1_tensor = paddle::empty_like( + recv_g1_tensor_grad, recv_g1_tensor_grad.dtype(), paddle::CPUPlace()); + d_local_g1_tensor.copy_(recv_g1_tensor_grad, d_local_g1_tensor.place(), + true); + } + } +#endif +#endif // USE_MPI + int64_t send_list_len = sendlist_tensor.numel(); + paddle::Tensor cpu_sendlist = paddle::empty( + {send_list_len}, paddle::DataType::INT64, paddle::CPUPlace()); + cpu_sendlist.copy_(sendlist_tensor, paddle::CPUPlace(), true); + int64_t* recvlist = cpu_sendlist.data(); + + int nswap = sendproc_tensor.dims()[0]; + // swap send and recv here + paddle::Tensor cpu_recvproc = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_recvproc.copy_(recvproc_tensor, paddle::CPUPlace(), true); + int* recvproc = cpu_recvproc.data(); + + paddle::Tensor cpu_sendproc = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_sendproc.copy_(sendproc_tensor, paddle::CPUPlace(), true); + int* sendproc = cpu_sendproc.data(); + + paddle::Tensor cpu_sendnum = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_sendnum.copy_(sendnum_tensor, paddle::CPUPlace(), true); + int* recvnum = cpu_sendnum.data(); + + paddle::Tensor cpu_recvnum = + paddle::empty({nswap}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_recvnum.copy_(recvnum_tensor, paddle::CPUPlace(), true); + int* sendnum = cpu_recvnum.data(); + + FPTYPE* local_g1 = d_local_g1_tensor.data(); + int tensor_size = d_local_g1_tensor.dims()[1]; + + paddle::Tensor cpu_nlocal = + paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_nlocal.copy_(nlocal_tensor, paddle::CPUPlace(), true); + int nlocal = *cpu_nlocal.data(); + + paddle::Tensor cpu_nghost = + paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace()); + cpu_nghost.copy_(nghost_tensor, paddle::CPUPlace(), true); + int nghost = *cpu_nghost.data(); + int ntotal = nlocal + nghost; + + paddle::Tensor send_g1_tensor, recv_g1_tensor; + FPTYPE *recv_g1 = nullptr, *send_g1 = nullptr; + + if (nswap != 0) { + send_g1_tensor = d_local_g1_tensor; + + int max_recvnum = + *(paddle::experimental::max(cpu_sendnum, {}, false).data()); + recv_g1_tensor = + paddle::empty({max_recvnum, tensor_size}, d_local_g1_tensor.dtype(), + d_local_g1_tensor.place()); + recv_g1 = recv_g1_tensor.data(); + send_g1 = send_g1_tensor.data() + ntotal * tensor_size; + } + + for (int iswap = nswap - 1; iswap >= 0; --iswap) { + int nrecv = recvnum[iswap]; + int nsend = sendnum[iswap]; + + paddle::Tensor irecvlist; + if (nrecv) { + std::intptr_t addr = static_cast(recvlist[iswap]); + int* irecvlist_ptr = reinterpret_cast(addr); + irecvlist = + paddle::from_blob(irecvlist_ptr, {nrecv}, paddle::DataType::INT32, + paddle::DataLayout::NCHW, paddle::CPUPlace()) + .copy_to(d_local_g1_tensor.place(), true); + } + + if (nsend) { + send_g1 -= nsend * tensor_size; + } + +#ifdef USE_MPI + if (sendproc[iswap] != me) { + if (nrecv) { + MPI_Irecv(recv_g1, nrecv * tensor_size, mpi_type, recvproc[iswap], 0, + world, &request); + } + if (nsend) { + MPI_Send(send_g1, nsend * tensor_size, mpi_type, sendproc[iswap], 0, + world); + } + if (nrecv) { + MPI_Wait(&request, MPI_STATUS_IGNORE); + } + } else { +#endif + if (nrecv) { +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) +#ifdef USE_MPI + if (cuda_aware == 0) { + memcpy(recv_g1, send_g1, + (unsigned long)nrecv * tensor_size * sizeof(FPTYPE)); + } else { + gpuMemcpy(recv_g1, send_g1, + (unsigned long)nrecv * tensor_size * sizeof(FPTYPE), + gpuMemcpyDeviceToDevice); + } +#else + gpuMemcpy(recv_g1, send_g1, + (unsigned long)nrecv * tensor_size * sizeof(FPTYPE), + gpuMemcpyDeviceToDevice); +#endif +#else + memcpy(recv_g1, send_g1, + (unsigned long)nrecv * tensor_size * sizeof(FPTYPE)); +#endif + } +#ifdef USE_MPI + } +#endif + if (nrecv) { + d_local_g1_tensor = paddle::experimental::index_add_( + d_local_g1_tensor, irecvlist, recv_g1_tensor.slice(0, nrecv), 0); + } + } +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) + gpuDeviceSynchronize(); +#endif + +#ifdef USE_MPI +#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) + if (cuda_aware == 0) { + recv_g1_tensor_grad.copy_(d_local_g1_tensor, recv_g1_tensor_grad.place(), + true); + } +#endif +#endif +} + +void Border_backward(const paddle::Tensor& sendlist_tensor, + const paddle::Tensor& sendproc_tensor, + const paddle::Tensor& recvproc_tensor, + const paddle::Tensor& sendnum_tensor, + const paddle::Tensor& recvnum_tensor, + const paddle::Tensor& g1_tensor, + const paddle::Tensor& communicator_tensor, + const paddle::Tensor& nlocal_tensor, + const paddle::Tensor& nghost_tensor, + paddle::Tensor& recv_g1_tensor_grad) { + bool type_flag = + (recv_g1_tensor_grad.dtype() == paddle::DataType::FLOAT64) ? true : false; + if (type_flag) { + Border_backward_t(sendlist_tensor, sendproc_tensor, recvproc_tensor, + sendnum_tensor, recvnum_tensor, g1_tensor, + communicator_tensor, nlocal_tensor, nghost_tensor, + recv_g1_tensor_grad); + } else { + Border_backward_t(sendlist_tensor, sendproc_tensor, recvproc_tensor, + sendnum_tensor, recvnum_tensor, g1_tensor, + communicator_tensor, nlocal_tensor, nghost_tensor, + recv_g1_tensor_grad); + } +} + +/** + * @brief communicate the latest g1_tensor info to other lmp proc + * @param[in] sendlist_tensor list of atoms to send in each swap + * @param[in] sendproc_tensor proc to send to at each swap + * @param[in] recvproc_tensor proc to recv from at each swap + * @param[in] sendnum_tensor # of atoms to send in each swap + * @param[in] recvnum_tensor # of atoms to recv in each swap + * @param[in] g1_tensor tensor to store g1_tensor info + * @param[in] communicator_tensor MPI_comm data in lmp + * @param[in] nlocal_tensor # of local atoms + * @param[in] nghost_tensor # of nghost atoms + * @param[out] recv_g1_tensor g1_tensor after communication + **/ +PD_BUILD_OP(border_op) + .Inputs({"sendlist_tensor", "sendproc_tensor", "recvproc_tensor", + "sendnum_tensor", "recvnum_tensor", "g1_tensor", + "communicator_tensor", "nlocal_tensor", "nghost_tensor"}) + .Outputs({"recv_g1_tensor"}) + .SetKernelFn(PD_KERNEL(Border_forward)) + .SetInplaceMap({{"g1_tensor", "recv_g1_tensor"}}); + +PD_BUILD_GRAD_OP(border_op) + .Inputs({"sendlist_tensor", "sendproc_tensor", "recvproc_tensor", + "sendnum_tensor", "recvnum_tensor", "g1_tensor", + "communicator_tensor", "nlocal_tensor", "nghost_tensor", + paddle::Grad("recv_g1_tensor")}) + .Outputs({paddle::Grad("g1_tensor")}) + .SetInplaceMap({{paddle::Grad("recv_g1_tensor"), + paddle::Grad("g1_tensor")}}) + .SetKernelFn(PD_KERNEL(Border_backward)); diff --git a/source/op/pd/setup.py b/source/op/pd/setup.py new file mode 100644 index 0000000000..951b50de9c --- /dev/null +++ b/source/op/pd/setup.py @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os + + +def main(): + current_dir = os.path.abspath(os.getcwd()) + script_dir = os.path.abspath(os.path.dirname(__file__)) + + if current_dir != script_dir: + raise RuntimeError( + f"[ERROR] Please run this script under directory: `{script_dir}`" + ) + + from paddle.utils.cpp_extension import ( + CppExtension, + setup, + ) + + setup(name="deepmd_op_pd", ext_modules=CppExtension(sources=["comm.cc"])) + + +if __name__ == "__main__": + main() diff --git a/source/tests/pd/model/test_model.py b/source/tests/pd/model/test_model.py index ce91fd3f21..fa62c28922 100644 --- a/source/tests/pd/model/test_model.py +++ b/source/tests/pd/model/test_model.py @@ -400,7 +400,7 @@ def test_consistency(self) -> None: .detach() .numpy(), ) - self.assertIsNone(model_predict_1.get("atom_virial", None)) + # self.assertIsNone(model_predict_1.get("atom_virial", None)) np.testing.assert_allclose( head_dict["atom_virial"], p_atomic_virial.reshape(head_dict["atom_virial"].shape)