Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 12 additions & 3 deletions deepmd/dpmodel/loss/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ class EnergyLoss(Loss):
The prefactor of generalized force loss at the end of the training.
numb_generalized_coord : int
The dimension of generalized coordinates.
use_default_pf : bool
If true, use default atom_pref of 1.0 for all atoms when atom_pref data is not provided.
This allows using the prefactor force loss (pf) without requiring atom_pref.npy files.
use_huber : bool
Enables Huber loss calculation for energy/force/virial terms with user-defined threshold delta (D).
The loss function smoothly transitions between L2 and L1 loss:
Expand Down Expand Up @@ -124,6 +127,7 @@ def __init__(
huber_delta: float | list[float] = 0.01,
loss_func: str = "mse",
f_use_norm: bool = False,
use_default_pf: bool = False,
intensive_ener_virial: bool = False,
**kwargs: Any,
) -> None:
Expand Down Expand Up @@ -164,6 +168,7 @@ def __init__(
self.use_huber = use_huber
self.huber_delta = huber_delta
self.f_use_norm = f_use_norm
self.use_default_pf = use_default_pf
self.intensive_ener_virial = intensive_ener_virial
if self.f_use_norm and not (self.use_huber or self.loss_func == "mae"):
raise RuntimeError(
Expand Down Expand Up @@ -203,7 +208,9 @@ def call(
find_force = label_dict["find_force"]
find_virial = label_dict["find_virial"]
find_atom_ener = label_dict["find_atom_ener"]
find_atom_pref = label_dict["find_atom_pref"]
find_atom_pref = (
label_dict["find_atom_pref"] if not self.use_default_pf else 1.0
)
Comment thread
iProzd marked this conversation as resolved.
xp = array_api_compat.array_namespace(
energy,
force,
Expand Down Expand Up @@ -504,6 +511,7 @@ def label_requirement(self) -> list[DataRequirementItem]:
must=False,
high_prec=False,
repeat=3,
default=1.0,
)
)
if self.has_gf > 0:
Expand Down Expand Up @@ -539,7 +547,7 @@ def serialize(self) -> dict:
"""
return {
"@class": "EnergyLoss",
"@version": 3,
"@version": 4,
"starter_learning_rate": self.starter_learning_rate,
"start_pref_e": self.start_pref_e,
"limit_pref_e": self.limit_pref_e,
Expand All @@ -560,6 +568,7 @@ def serialize(self) -> dict:
"huber_delta": self.huber_delta,
"loss_func": self.loss_func,
"f_use_norm": self.f_use_norm,
"use_default_pf": self.use_default_pf,
"intensive_ener_virial": self.intensive_ener_virial,
}

Expand All @@ -579,7 +588,7 @@ def deserialize(cls, data: dict) -> "Loss":
"""
data = data.copy()
version = data.pop("@version")
check_version_compatibility(version, 3, 1)
check_version_compatibility(version, 4, 1)
data.pop("@class")
# Backward compatibility: version 1-2 used legacy normalization
if version < 3:
Expand Down
9 changes: 7 additions & 2 deletions deepmd/pd/loss/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,10 @@ def __init__(
raise NotImplementedError(
"Paddle backend does not support f_use_norm=True."
)
if kwargs.get("use_default_pf", False):
raise NotImplementedError(
"Paddle backend does not support use_default_pf=True."
)

self.starter_learning_rate = starter_learning_rate
self.has_e = (start_pref_e != 0.0 and limit_pref_e != 0.0) or inference
Expand Down Expand Up @@ -577,7 +581,7 @@ def serialize(self) -> dict:
"""
return {
"@class": "EnergyLoss",
"@version": 3,
"@version": 4,
"starter_learning_rate": self.starter_learning_rate,
"start_pref_e": self.start_pref_e,
"limit_pref_e": self.limit_pref_e,
Expand All @@ -598,6 +602,7 @@ def serialize(self) -> dict:
"huber_delta": self.huber_delta,
"loss_func": self.loss_func,
"f_use_norm": self.f_use_norm,
"use_default_pf": getattr(self, "use_default_pf", False),
"intensive_ener_virial": self.intensive_ener_virial,
}

Expand All @@ -617,7 +622,7 @@ def deserialize(cls, data: dict) -> "TaskLoss":
"""
data = data.copy()
version = data.pop("@version")
check_version_compatibility(version, 3, 1)
check_version_compatibility(version, 4, 1)
data.pop("@class")
# Handle backward compatibility for older versions without intensive_ener_virial
if version < 3:
Expand Down
17 changes: 13 additions & 4 deletions deepmd/pt/loss/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def __init__(
loss_func: str = "mse",
inference: bool = False,
use_huber: bool = False,
use_default_pf: bool = False,
f_use_norm: bool = False,
huber_delta: float | list[float] = 0.01,
intensive_ener_virial: bool = False,
Expand Down Expand Up @@ -107,6 +108,9 @@ def __init__(
MAE loss is less sensitive to outliers compared to MSE loss.
inference : bool
If true, it will output all losses found in output, ignoring the pre-factors.
use_default_pf : bool
If true, use default atom_pref of 1.0 for all atoms when atom_pref data is not provided.
This allows using the prefactor force loss (pf) without requiring atom_pref.npy files.
use_huber : bool
Enables Huber loss calculation for energy/force/virial terms with user-defined threshold delta (D).
The loss function smoothly transitions between L2 and L1 loss:
Expand Down Expand Up @@ -161,6 +165,7 @@ def __init__(
self.limit_pref_pf = limit_pref_pf
self.start_pref_gf = start_pref_gf
self.limit_pref_gf = limit_pref_gf
self.use_default_pf = use_default_pf
self.relative_f = relative_f
self.enable_atom_ener_coeff = enable_atom_ener_coeff
self.numb_generalized_coord = numb_generalized_coord
Expand Down Expand Up @@ -381,7 +386,9 @@ def forward(

if self.has_pf and "atom_pref" in label:
atom_pref = label["atom_pref"]
find_atom_pref = label.get("find_atom_pref", 0.0)
find_atom_pref = (
label.get("find_atom_pref", 0.0) if not self.use_default_pf else 1.0
)
pref_pf = pref_pf * find_atom_pref
atom_pref_reshape = atom_pref.reshape(-1)

Expand Down Expand Up @@ -538,7 +545,7 @@ def label_requirement(self) -> list[DataRequirementItem]:
high_prec=True,
)
)
if self.has_f:
if self.has_f or self.has_pf or self.relative_f is not None or self.has_gf:
label_requirement.append(
DataRequirementItem(
"force",
Expand Down Expand Up @@ -577,6 +584,7 @@ def label_requirement(self) -> list[DataRequirementItem]:
must=False,
high_prec=False,
repeat=3,
default=1.0,
)
)
if self.has_gf > 0:
Expand Down Expand Up @@ -612,7 +620,7 @@ def serialize(self) -> dict:
"""
return {
"@class": "EnergyLoss",
"@version": 3,
"@version": 4,
"starter_learning_rate": self.starter_learning_rate,
"start_pref_e": self.start_pref_e,
"limit_pref_e": self.limit_pref_e,
Expand All @@ -633,6 +641,7 @@ def serialize(self) -> dict:
"huber_delta": self.huber_delta,
"loss_func": self.loss_func,
"f_use_norm": self.f_use_norm,
"use_default_pf": self.use_default_pf,
"intensive_ener_virial": self.intensive_ener_virial,
}

Expand All @@ -652,7 +661,7 @@ def deserialize(cls, data: dict) -> "TaskLoss":
"""
data = data.copy()
version = data.pop("@version")
check_version_compatibility(version, 3, 1)
check_version_compatibility(version, 4, 1)
data.pop("@class")
# Handle backward compatibility for older versions without intensive_ener_virial
if version < 3:
Expand Down
9 changes: 7 additions & 2 deletions deepmd/tf/loss/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,10 @@ def __init__(
raise NotImplementedError(
"TensorFlow backend does not support f_use_norm=True."
)
if kwargs.get("use_default_pf", False):
raise NotImplementedError(
"TensorFlow backend does not support use_default_pf=True."
)

self.starter_learning_rate = starter_learning_rate
self.start_pref_e = start_pref_e
Expand Down Expand Up @@ -557,7 +561,7 @@ def serialize(self, suffix: str = "") -> dict:
"""
return {
"@class": "EnergyLoss",
"@version": 3,
"@version": 4,
"starter_learning_rate": self.starter_learning_rate,
"start_pref_e": self.start_pref_e,
"limit_pref_e": self.limit_pref_e,
Expand All @@ -578,6 +582,7 @@ def serialize(self, suffix: str = "") -> dict:
"huber_delta": self.huber_delta,
"loss_func": self.loss_func,
"f_use_norm": self.f_use_norm,
"use_default_pf": getattr(self, "use_default_pf", False),
"intensive_ener_virial": self.intensive_ener_virial,
}

Expand All @@ -599,7 +604,7 @@ def deserialize(cls, data: dict, suffix: str = "") -> "Loss":
"""
data = data.copy()
version = data.pop("@version")
check_version_compatibility(version, 3, 1)
check_version_compatibility(version, 4, 1)
data.pop("@class")
# Handle backward compatibility for older versions without intensive_ener_virial
if version < 3:
Expand Down
14 changes: 14 additions & 0 deletions deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -3217,6 +3217,13 @@ def loss_ener() -> list[Argument]:
"atomic prefactor force", label="atom_pref", abbr="pf"
)
doc_limit_pref_pf = limit_pref("atomic prefactor force")
doc_use_default_pf = (
"If true, use default atom_pref of 1.0 for all atoms when atom_pref data is not provided. "
"This allows using the prefactor force loss (pf) without requiring atom_pref.npy files in training data. "
"When atom_pref.npy is provided, it will be used as-is regardless of this setting. "
"Note: this option is only effective for the PyTorch/DPModel backends; "
"the TensorFlow and Paddle backends raise NotImplementedError when set to true."
)
doc_start_pref_gf = start_pref("generalized force", label="drdq", abbr="gf")
doc_limit_pref_gf = limit_pref("generalized force")
doc_numb_generalized_coord = "The dimension of generalized coordinates. Required when generalized force loss is used."
Expand Down Expand Up @@ -3339,6 +3346,13 @@ def loss_ener() -> list[Argument]:
default=0.00,
doc=doc_limit_pref_pf,
),
Argument(
"use_default_pf",
bool,
optional=True,
default=False,
doc=doc_use_default_pf,
),
Comment thread
iProzd marked this conversation as resolved.
Argument("relative_f", [float, None], optional=True, doc=doc_relative_f),
Argument(
"enable_atom_ener_coeff",
Expand Down
16 changes: 16 additions & 0 deletions doc/model/train-se-a-mask.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,22 @@ And the `loss` section in the training input script should be set as follows.
}
```

If `atom_pref.npy` is not provided in the training data, one can set `use_default_pf` to `true` to use a default atom preference of 1.0 for all atoms. This allows using the prefactor force loss (`pf` loss) without requiring `atom_pref.npy` files. When `atom_pref.npy` is provided, it will be used as-is regardless of this setting.
Comment thread
iProzd marked this conversation as resolved.

```json
"loss": {
"type": "ener",
"start_pref_e": 0.0,
"limit_pref_e": 0.0,
"start_pref_f": 0.0,
"limit_pref_f": 0.0,
"start_pref_pf": 1.0,
"limit_pref_pf": 1.0,
"use_default_pf": true,
"_comment": " that's all"
}
```

## Type embedding

Same as [`se_e2_a`](./train-se-e2-a.md).
Expand Down
Loading
Loading