Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
231 changes: 171 additions & 60 deletions deepmd/pt/loss/property.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,13 @@
class PropertyLoss(TaskLoss):
def __init__(
self,
starter_learning_rate: float,
task_dim,
var_name: str,
start_pref_property: float = 1.00,
limit_pref_property: float = 1.00,
start_pref_aproperty: float = 0.0,
limit_pref_aproperty: float = 0.0,
loss_func: str = "smooth_mae",
metric: list = ["mae"],
beta: float = 1.00,
Expand Down Expand Up @@ -57,6 +62,11 @@ def __init__(
Whether the property is intensive.
"""
super().__init__()
self.starter_learning_rate = starter_learning_rate
self.start_pref_property = start_pref_property
self.limit_pref_property = limit_pref_property
self.start_pref_aproperty = start_pref_aproperty
self.limit_pref_aproperty = limit_pref_aproperty
self.task_dim = task_dim
self.loss_func = loss_func
self.metric = metric
Expand All @@ -66,6 +76,20 @@ def __init__(
self.intensive = intensive
self.var_name = var_name

assert (
self.start_pref_property >= 0.0
and self.limit_pref_property >= 0.0
and self.start_pref_aproperty >= 0.0
and self.limit_pref_aproperty >= 0.0
), "Can not assign negative weight to `pref` and `pref_atomic`"

self.has_property = start_pref_property != 0.0 and limit_pref_property != 0.0
self.has_aproperty = start_pref_aproperty != 0.0 and limit_pref_aproperty != 0.0

assert self.has_property or self.has_aproperty, AssertionError(
"Can not assian zero weight both to `pref` and `pref_atomic`"
)

def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False):
"""Return loss on properties .

Expand Down Expand Up @@ -98,6 +122,16 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False
model_pred[var_name] = model_pred[var_name] / natoms
label[var_name] = label[var_name] / natoms

coef = learning_rate / self.starter_learning_rate
pref_property = (
self.limit_pref_property
+ (self.start_pref_property - self.limit_pref_property) * coef
)
pref_aproperty = (
self.limit_pref_aproperty
+ (self.start_pref_aproperty - self.limit_pref_aproperty) * coef
)

if self.out_std is None:
out_std = model.atomic_model.out_std[0][0]
else:
Expand All @@ -123,79 +157,156 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False
loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0]
more_loss = {}

# loss
if self.loss_func == "smooth_mae":
loss += F.smooth_l1_loss(
(label[var_name] - out_bias) / out_std,
(model_pred[var_name] - out_bias) / out_std,
reduction="sum",
beta=self.beta,
)
elif self.loss_func == "mae":
loss += F.l1_loss(
(label[var_name] - out_bias) / out_std,
(model_pred[var_name] - out_bias) / out_std,
reduction="sum",
)
elif self.loss_func == "mse":
loss += F.mse_loss(
(label[var_name] - out_bias) / out_std,
(model_pred[var_name] - out_bias) / out_std,
reduction="sum",
)
elif self.loss_func == "rmse":
loss += torch.sqrt(
F.mse_loss(
if self.has_property and self.var_name in model_pred and self.var_name in label:
# loss
if self.loss_func == "smooth_mae":
loss += pref_property * F.smooth_l1_loss(
(label[var_name] - out_bias) / out_std,
(model_pred[var_name] - out_bias) / out_std,
reduction="mean",
reduction="sum",
beta=self.beta,
)
)
else:
raise RuntimeError(f"Unknown loss function : {self.loss_func}")

# more loss
if "smooth_mae" in self.metric:
more_loss["smooth_mae"] = F.smooth_l1_loss(
label[var_name],
model_pred[var_name],
reduction="mean",
beta=self.beta,
).detach()
if "mae" in self.metric:
more_loss["mae"] = F.l1_loss(
label[var_name],
model_pred[var_name],
reduction="mean",
).detach()
if "mse" in self.metric:
more_loss["mse"] = F.mse_loss(
label[var_name],
model_pred[var_name],
reduction="mean",
).detach()
if "rmse" in self.metric:
more_loss["rmse"] = torch.sqrt(
F.mse_loss(
elif self.loss_func == "mae":
loss += pref_property * F.l1_loss(
(label[var_name] - out_bias) / out_std,
(model_pred[var_name] - out_bias) / out_std,
reduction="sum",
)
elif self.loss_func == "mse":
loss += pref_property * F.mse_loss(
(label[var_name] - out_bias) / out_std,
(model_pred[var_name] - out_bias) / out_std,
reduction="sum",
)
elif self.loss_func == "rmse":
loss += pref_property * torch.sqrt(
F.mse_loss(
(label[var_name] - out_bias) / out_std,
(model_pred[var_name] - out_bias) / out_std,
reduction="mean",
)
)
else:
raise RuntimeError(f"Unknown loss function : {self.loss_func}")

# more loss
if "smooth_mae" in self.metric:
more_loss["smooth_mae"] = F.smooth_l1_loss(
label[var_name],
model_pred[var_name],
reduction="mean",
beta=self.beta,
).detach()
if "mae" in self.metric:
more_loss["mae"] = F.l1_loss(
label[var_name],
model_pred[var_name],
reduction="mean",
).detach()
if "mse" in self.metric:
more_loss["mse"] = F.mse_loss(
label[var_name],
model_pred[var_name],
reduction="mean",
).detach()
if "rmse" in self.metric:
more_loss["rmse"] = torch.sqrt(
F.mse_loss(
label[var_name],
model_pred[var_name],
reduction="mean",
)
).detach()

if (
self.has_aproperty
and f"atom_{self.var_name}" in model_pred
and f"atom_{self.var_name}" in label
):
# loss
if self.loss_func == "smooth_mae":
loss += pref_aproperty * F.smooth_l1_loss(
(label[f"atom_{var_name}"] - out_bias) / out_std,
(model_pred[f"atom_{var_name}"] - out_bias) / out_std,
reduction="sum",
beta=self.beta,
)
elif self.loss_func == "mae":
loss += pref_aproperty * F.l1_loss(
(label[f"atom_{var_name}"] - out_bias) / out_std,
(model_pred[f"atom_{var_name}"] - out_bias) / out_std,
reduction="sum",
)
elif self.loss_func == "mse":
loss += pref_aproperty * F.mse_loss(
(label[f"atom_{var_name}"] - out_bias) / out_std,
(model_pred[f"atom_{var_name}"] - out_bias) / out_std,
reduction="sum",
)
).detach()
elif self.loss_func == "rmse":
loss += pref_aproperty * torch.sqrt(
F.mse_loss(
(label[f"atom_{var_name}"] - out_bias) / out_std,
(model_pred[f"atom_{var_name}"] - out_bias) / out_std,
reduction="mean",
)
)
else:
raise RuntimeError(f"Unknown loss function : {self.loss_func}")

# more loss
if "smooth_mae" in self.metric:
more_loss["smooth_mae_atom"] = F.smooth_l1_loss(
label[f"atom_{var_name}"],
model_pred[f"atom_{var_name}"],
reduction="mean",
beta=self.beta,
).detach()
if "mae" in self.metric:
more_loss["mae_atom"] = F.l1_loss(
label[f"atom_{var_name}"],
model_pred[f"atom_{var_name}"],
reduction="mean",
).detach()
if "mse" in self.metric:
more_loss["mse_atom"] = F.mse_loss(
label[f"atom_{var_name}"],
model_pred[f"atom_{var_name}"],
reduction="mean",
).detach()
if "rmse" in self.metric:
more_loss["rmse_atom"] = torch.sqrt(
F.mse_loss(
label[f"atom_{var_name}"],
model_pred[f"atom_{var_name}"],
reduction="mean",
)
).detach()

return model_pred, loss, more_loss

@property
def label_requirement(self) -> list[DataRequirementItem]:
"""Return data label requirements needed for this loss calculation."""
label_requirement = []
label_requirement.append(
DataRequirementItem(
self.var_name,
ndof=self.task_dim,
atomic=False,
must=True,
high_prec=True,
if self.has_aproperty:
label_requirement.append(
DataRequirementItem(
f"atom_{self.var_name}",
ndof=self.task_dim,
atomic=True,
must=False,
high_prec=True,
)
)
if self.has_property:
label_requirement.append(
DataRequirementItem(
self.var_name,
ndof=self.task_dim,
atomic=False,
must=False,
high_prec=True,
)
)
)
return label_requirement
1 change: 1 addition & 0 deletions deepmd/pt/train/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -1266,6 +1266,7 @@ def get_loss(loss_params, start_lr, _ntypes, _model):
loss_params["task_dim"] = task_dim
loss_params["var_name"] = var_name
loss_params["intensive"] = intensive
loss_params["starter_learning_rate"] = start_lr
return PropertyLoss(**loss_params)
else:
loss_params["starter_learning_rate"] = start_lr
Expand Down
32 changes: 32 additions & 0 deletions deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -2742,6 +2742,10 @@ def loss_property():
doc_loss_func = "The loss function to minimize, such as 'mae','smooth_mae'."
doc_metric = "The metric for display. This list can include 'smooth_mae', 'mae', 'mse' and 'rmse'."
doc_beta = "The 'beta' parameter in 'smooth_mae' loss."
doc_start_pref_property = start_pref("property")
doc_limit_pref_property = limit_pref("property")
doc_start_pref_aproperty = start_pref("atomic property")
doc_limit_pref_aproperty = limit_pref("atomic property")
return [
Argument(
"loss_func",
Expand All @@ -2764,6 +2768,34 @@ def loss_property():
default=1.00,
doc=doc_beta,
),
Argument(
"start_pref_property",
[float, int],
optional=True,
default=1.00,
doc=doc_start_pref_property,
),
Argument(
"limit_pref_property",
[float, int],
optional=True,
default=1.00,
doc=doc_limit_pref_property,
),
Argument(
"start_pref_aproperty",
[float, int],
optional=True,
default=0.00,
doc=doc_start_pref_aproperty,
),
Argument(
"limit_pref_aproperty",
[float, int],
optional=True,
default=0.00,
doc=doc_limit_pref_aproperty,
),
]


Expand Down
Loading