diff --git a/source/api_cc/tests/test_deeppot_model_devi_ptexpt.cc b/source/api_cc/tests/test_deeppot_model_devi_ptexpt.cc new file mode 100644 index 0000000000..7b5a3bbd9e --- /dev/null +++ b/source/api_cc/tests/test_deeppot_model_devi_ptexpt.cc @@ -0,0 +1,534 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +// Test C++ DeepPotModelDevi inference for pt_expt (.pt2) backend. +// Uses two SE(A) models with fparam/aparam + default_fparam, different seeds. +#include + +#include +#include +#include +#include + +#include "DeepPot.h" +#include "neighbor_list.h" +#include "test_utils.h" + +// 1e-10 cannot pass; same as fparam_aparam ptexpt tests +#undef EPSILON +#define EPSILON (std::is_same::value ? 1e-7 : 1e-4) + +// --------------------------------------------------------------------------- +// Test class 1: individual vs model_devi consistency +// --------------------------------------------------------------------------- +template +class TestInferDeepPotModeDeviPtExpt : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector atype = {0, 0, 0, 0, 0, 0}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + std::vector fparam = {0.25852028}; + std::vector aparam = {0.25852028, 0.25852028, 0.25852028, + 0.25852028, 0.25852028, 0.25852028}; + int natoms; + + deepmd::DeepPot dp0; + deepmd::DeepPot dp1; + deepmd::DeepPotModelDevi dp_md; + + void SetUp() override { +#ifndef BUILD_PYTORCH + GTEST_SKIP() << "Skip because PyTorch support is not enabled."; +#endif + dp0.init("../../tests/infer/model_devi_md0.pt2"); + dp1.init("../../tests/infer/model_devi_md1.pt2"); + dp_md.init( + std::vector({"../../tests/infer/model_devi_md0.pt2", + "../../tests/infer/model_devi_md1.pt2"})); + natoms = coord.size() / 3; + }; + + void TearDown() override {}; +}; + +TYPED_TEST_SUITE(TestInferDeepPotModeDeviPtExpt, ValueTypes); + +TYPED_TEST(TestInferDeepPotModeDeviPtExpt, attrs) { + using VALUETYPE = TypeParam; + deepmd::DeepPot& dp0 = this->dp0; + deepmd::DeepPot& dp1 = this->dp1; + deepmd::DeepPotModelDevi& dp_md = this->dp_md; + EXPECT_EQ(dp0.cutoff(), dp_md.cutoff()); + EXPECT_EQ(dp0.numb_types(), dp_md.numb_types()); + EXPECT_EQ(dp0.dim_fparam(), dp_md.dim_fparam()); + EXPECT_EQ(dp0.dim_aparam(), dp_md.dim_aparam()); + EXPECT_EQ(dp1.cutoff(), dp_md.cutoff()); + EXPECT_EQ(dp1.numb_types(), dp_md.numb_types()); + EXPECT_EQ(dp1.dim_fparam(), dp_md.dim_fparam()); + EXPECT_EQ(dp1.dim_aparam(), dp_md.dim_aparam()); + EXPECT_EQ(dp_md.dim_fparam(), 1); + EXPECT_EQ(dp_md.dim_aparam(), 1); + EXPECT_TRUE(dp_md.has_default_fparam()); +} + +TYPED_TEST(TestInferDeepPotModeDeviPtExpt, cpu_build_nlist) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& fparam = this->fparam; + std::vector& aparam = this->aparam; + deepmd::DeepPot& dp0 = this->dp0; + deepmd::DeepPot& dp1 = this->dp1; + deepmd::DeepPotModelDevi& dp_md = this->dp_md; + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), vdir(nmodel), fmd(nmodel), + vmd; + dp0.compute(edir[0], fdir[0], vdir[0], coord, atype, box, fparam, aparam); + dp1.compute(edir[1], fdir[1], vdir[1], coord, atype, box, fparam, aparam); + dp_md.compute(emd, fmd, vmd, coord, atype, box, fparam, aparam); + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(vdir.size(), vmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (size_t ii = 0; ii < fdir[kk].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (size_t ii = 0; ii < vdir[kk].size(); ++ii) { + EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + } + } +} + +TYPED_TEST(TestInferDeepPotModeDeviPtExpt, cpu_build_nlist_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& fparam = this->fparam; + std::vector& aparam = this->aparam; + deepmd::DeepPot& dp0 = this->dp0; + deepmd::DeepPot& dp1 = this->dp1; + deepmd::DeepPotModelDevi& dp_md = this->dp_md; + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir(nmodel), vdir(nmodel), fmd(nmodel), + vmd, aedir(nmodel), aemd, avdir(nmodel), avmd(nmodel); + dp0.compute(edir[0], fdir[0], vdir[0], aedir[0], avdir[0], coord, atype, box, + fparam, aparam); + dp1.compute(edir[1], fdir[1], vdir[1], aedir[1], avdir[1], coord, atype, box, + fparam, aparam); + dp_md.compute(emd, fmd, vmd, aemd, avmd, coord, atype, box, fparam, aparam); + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(vdir.size(), vmd.size()); + EXPECT_EQ(aedir.size(), aemd.size()); + EXPECT_EQ(avdir.size(), avmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); + EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (size_t ii = 0; ii < fdir[kk].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (size_t ii = 0; ii < vdir[kk].size(); ++ii) { + EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + } + for (size_t ii = 0; ii < aedir[kk].size(); ++ii) { + EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); + } + for (size_t ii = 0; ii < avdir[kk].size(); ++ii) { + EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); + } + } +} + +TYPED_TEST(TestInferDeepPotModeDeviPtExpt, cpu_lmp_list) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& fparam = this->fparam; + std::vector& aparam = this->aparam; + deepmd::DeepPot& dp0 = this->dp0; + deepmd::DeepPot& dp1 = this->dp1; + deepmd::DeepPotModelDevi& dp_md = this->dp_md; + float rc = dp_md.cutoff(); + int nloc = coord.size() / 3; + std::vector coord_cpy; + std::vector atype_cpy, mapping; + std::vector > nlist_data; + _build_nlist(nlist_data, coord_cpy, atype_cpy, mapping, coord, + atype, box, rc); + int nall = coord_cpy.size() / 3; + std::vector ilist(nloc), numneigh(nloc); + std::vector firstneigh(nloc); + deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir_(nmodel), fdir(nmodel), + vdir(nmodel), fmd_, fmd(nmodel), vmd; + dp0.compute(edir[0], fdir_[0], vdir[0], coord_cpy, atype_cpy, box, + nall - nloc, inlist, 0, fparam, aparam); + dp1.compute(edir[1], fdir_[1], vdir[1], coord_cpy, atype_cpy, box, + nall - nloc, inlist, 0, fparam, aparam); + dp_md.compute(emd, fmd_, vmd, coord_cpy, atype_cpy, box, nall - nloc, inlist, + 0, fparam, aparam); + for (int kk = 0; kk < nmodel; ++kk) { + _fold_back(fdir[kk], fdir_[kk], mapping, nloc, nall, 3); + _fold_back(fmd[kk], fmd_[kk], mapping, nloc, nall, 3); + } + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(vdir.size(), vmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (size_t ii = 0; ii < fdir[kk].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (size_t ii = 0; ii < vdir[kk].size(); ++ii) { + EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + } + } +} + +TYPED_TEST(TestInferDeepPotModeDeviPtExpt, cpu_lmp_list_atomic) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& fparam = this->fparam; + std::vector& aparam = this->aparam; + deepmd::DeepPot& dp0 = this->dp0; + deepmd::DeepPot& dp1 = this->dp1; + deepmd::DeepPotModelDevi& dp_md = this->dp_md; + float rc = dp_md.cutoff(); + int nloc = coord.size() / 3; + std::vector coord_cpy; + std::vector atype_cpy, mapping; + std::vector > nlist_data; + _build_nlist(nlist_data, coord_cpy, atype_cpy, mapping, coord, + atype, box, rc); + int nall = coord_cpy.size() / 3; + std::vector ilist(nloc), numneigh(nloc); + std::vector firstneigh(nloc); + deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + + int nmodel = 2; + std::vector edir(nmodel), emd; + std::vector > fdir_(nmodel), fdir(nmodel), + vdir(nmodel), fmd_, fmd(nmodel), vmd, aedir(nmodel), aemd, avdir(nmodel), + avdir_(nmodel), avmd(nmodel), avmd_; + dp0.compute(edir[0], fdir_[0], vdir[0], aedir[0], avdir_[0], coord_cpy, + atype_cpy, box, nall - nloc, inlist, 0, fparam, aparam); + dp1.compute(edir[1], fdir_[1], vdir[1], aedir[1], avdir_[1], coord_cpy, + atype_cpy, box, nall - nloc, inlist, 0, fparam, aparam); + dp_md.compute(emd, fmd_, vmd, aemd, avmd_, coord_cpy, atype_cpy, box, + nall - nloc, inlist, 0, fparam, aparam); + for (int kk = 0; kk < nmodel; ++kk) { + _fold_back(fdir[kk], fdir_[kk], mapping, nloc, nall, 3); + _fold_back(fmd[kk], fmd_[kk], mapping, nloc, nall, 3); + _fold_back(avdir[kk], avdir_[kk], mapping, nloc, nall, 9); + _fold_back(avmd[kk], avmd_[kk], mapping, nloc, nall, 9); + } + + EXPECT_EQ(edir.size(), emd.size()); + EXPECT_EQ(fdir.size(), fmd.size()); + EXPECT_EQ(vdir.size(), vmd.size()); + EXPECT_EQ(aedir.size(), aemd.size()); + EXPECT_EQ(avdir.size(), avmd.size()); + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_EQ(fdir[kk].size(), fmd[kk].size()); + EXPECT_EQ(vdir[kk].size(), vmd[kk].size()); + EXPECT_EQ(aedir[kk].size(), aemd[kk].size()); + EXPECT_EQ(avdir[kk].size(), avmd[kk].size()); + } + for (int kk = 0; kk < nmodel; ++kk) { + EXPECT_LT(fabs(edir[kk] - emd[kk]), EPSILON); + for (size_t ii = 0; ii < fdir[kk].size(); ++ii) { + EXPECT_LT(fabs(fdir[kk][ii] - fmd[kk][ii]), EPSILON); + } + for (size_t ii = 0; ii < vdir[kk].size(); ++ii) { + EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), EPSILON); + } + for (size_t ii = 0; ii < aedir[kk].size(); ++ii) { + EXPECT_LT(fabs(aedir[kk][ii] - aemd[kk][ii]), EPSILON); + } + for (size_t ii = 0; ii < avdir[kk].size(); ++ii) { + EXPECT_LT(fabs(avdir[kk][ii] - avmd[kk][ii]), EPSILON); + } + } +} + +TYPED_TEST(TestInferDeepPotModeDeviPtExpt, cpu_lmp_list_std) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& fparam = this->fparam; + std::vector& aparam = this->aparam; + deepmd::DeepPotModelDevi& dp_md = this->dp_md; + float rc = dp_md.cutoff(); + int nloc = coord.size() / 3; + std::vector coord_cpy; + std::vector atype_cpy, mapping; + std::vector > nlist_data; + _build_nlist(nlist_data, coord_cpy, atype_cpy, mapping, coord, + atype, box, rc); + int nall = coord_cpy.size() / 3; + std::vector ilist(nloc), numneigh(nloc); + std::vector firstneigh(nloc); + deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + + int nmodel = 2; + std::vector emd; + std::vector > fmd_, fmd(nmodel), vmd; + std::vector > aemd(nmodel), aemd_, avmd(nmodel), avmd_; + dp_md.compute(emd, fmd_, vmd, aemd_, avmd_, coord_cpy, atype_cpy, box, + nall - nloc, inlist, 0, fparam, aparam); + for (int kk = 0; kk < nmodel; ++kk) { + _fold_back(fmd[kk], fmd_[kk], mapping, nloc, nall, 3); + _fold_back(avmd[kk], avmd_[kk], mapping, nloc, nall, 9); + aemd[kk].resize(nloc); + for (int ii = 0; ii < nloc; ++ii) { + aemd[kk][ii] = aemd_[kk][ii]; + } + } + + // dp compute std e + std::vector avg_e, std_e; + dp_md.compute_avg(avg_e, aemd); + dp_md.compute_std_e(std_e, avg_e, aemd); + + // manual compute std e + std::vector manual_avg_e(nloc); + std::vector manual_std_e(nloc); + for (int ii = 0; ii < nloc; ++ii) { + double avg_e(0.0); + for (int kk = 0; kk < nmodel; ++kk) { + avg_e += aemd[kk][ii]; + } + avg_e /= nmodel; + manual_avg_e[ii] = avg_e; + double std = 0; + for (int kk = 0; kk < nmodel; ++kk) { + std += (aemd[kk][ii] - avg_e) * (aemd[kk][ii] - avg_e); + } + std = sqrt(std / nmodel); + manual_std_e[ii] = std; + } + EXPECT_EQ(manual_std_e.size(), std_e.size()); + for (size_t ii = 0; ii < std_e.size(); ++ii) { + EXPECT_LT(fabs(manual_avg_e[ii] - avg_e[ii]), EPSILON); + EXPECT_LT(fabs(manual_std_e[ii] - std_e[ii]), EPSILON); + } + + // dp compute std f + std::vector avg_f, std_f; + dp_md.compute_avg(avg_f, fmd); + dp_md.compute_std_f(std_f, avg_f, fmd); + + // manual compute std f + std::vector manual_std_f(nloc); + std::vector manual_rel_std_f(nloc); + VALUETYPE eps = 0.2; + EXPECT_EQ(fmd[0].size(), static_cast(nloc * 3)); + for (int ii = 0; ii < nloc; ++ii) { + std::vector avg_f(3, 0.0); + for (int dd = 0; dd < 3; ++dd) { + for (int kk = 0; kk < nmodel; ++kk) { + avg_f[dd] += fmd[kk][ii * 3 + dd]; + } + avg_f[dd] /= (nmodel) * 1.0; + } + VALUETYPE std = 0.; + for (int kk = 0; kk < nmodel; ++kk) { + for (int dd = 0; dd < 3; ++dd) { + VALUETYPE tmp = fmd[kk][ii * 3 + dd] - avg_f[dd]; + std += tmp * tmp; + } + } + VALUETYPE f_norm = 0; + for (int dd = 0; dd < 3; ++dd) { + f_norm += avg_f[dd] * avg_f[dd]; + } + f_norm = sqrt(f_norm); + std /= nmodel * 1.0; + manual_std_f[ii] = sqrt(std); + manual_rel_std_f[ii] = manual_std_f[ii] / (f_norm + eps); + } + + EXPECT_EQ(manual_std_f.size(), std_f.size()); + for (size_t ii = 0; ii < std_f.size(); ++ii) { + EXPECT_LT(fabs(manual_std_f[ii] - std_f[ii]), EPSILON); + } + dp_md.compute_relative_std_f(std_f, avg_f, eps); + EXPECT_EQ(manual_std_f.size(), std_f.size()); + for (size_t ii = 0; ii < std_f.size(); ++ii) { + EXPECT_LT(fabs(manual_rel_std_f[ii] - std_f[ii]), EPSILON); + } +} + +// --------------------------------------------------------------------------- +// Test class 2: precomputed reference values +// --------------------------------------------------------------------------- +template +class TestInferDeepPotModeDeviPtExptPrecomputed : public ::testing::Test { + protected: + std::vector coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74, + 00.25, 3.32, 1.68, 3.36, 3.00, 1.81, + 3.51, 2.51, 2.60, 4.27, 3.22, 1.56}; + std::vector atype = {0, 0, 0, 0, 0, 0}; + std::vector box = {13., 0., 0., 0., 13., 0., 0., 0., 13.}; + std::vector fparam = {0.25852028}; + std::vector aparam = {0.25852028, 0.25852028, 0.25852028, + 0.25852028, 0.25852028, 0.25852028}; + int natoms; + std::vector expected_md_f = { + 8.458165365077899029e-04, 5.029083479824372890e-04, + 6.882240709962303546e-04}; // max min avg + std::vector expected_md_v = { + 2.467581010020606898e-04, 4.340036212959217315e-05, + 1.286020468466399431e-04}; // max min mystd + + deepmd::DeepPot dp0; + deepmd::DeepPot dp1; + deepmd::DeepPotModelDevi dp_md; + + void SetUp() override { +#ifndef BUILD_PYTORCH + GTEST_SKIP() << "Skip because PyTorch support is not enabled."; +#endif + dp0.init("../../tests/infer/model_devi_md0.pt2"); + dp1.init("../../tests/infer/model_devi_md1.pt2"); + dp_md.init( + std::vector({"../../tests/infer/model_devi_md0.pt2", + "../../tests/infer/model_devi_md1.pt2"})); + natoms = coord.size() / 3; + }; + + void TearDown() override {}; +}; + +TYPED_TEST_SUITE(TestInferDeepPotModeDeviPtExptPrecomputed, ValueTypes); + +template +inline VALUETYPE mymax_ptexpt(const std::vector& xx) { + VALUETYPE ret = 0; + for (size_t ii = 0; ii < xx.size(); ++ii) { + if (xx[ii] > ret) { + ret = xx[ii]; + } + } + return ret; +} +template +inline VALUETYPE mymin_ptexpt(const std::vector& xx) { + VALUETYPE ret = 1e10; + for (size_t ii = 0; ii < xx.size(); ++ii) { + if (xx[ii] < ret) { + ret = xx[ii]; + } + } + return ret; +} +template +inline VALUETYPE myavg_ptexpt(const std::vector& xx) { + VALUETYPE ret = 0; + for (size_t ii = 0; ii < xx.size(); ++ii) { + ret += xx[ii]; + } + return (ret / xx.size()); +} +template +inline VALUETYPE mystd_ptexpt(const std::vector& xx) { + VALUETYPE ret = 0; + for (size_t ii = 0; ii < xx.size(); ++ii) { + ret += xx[ii] * xx[ii]; + } + return sqrt(ret / xx.size()); +} + +TYPED_TEST(TestInferDeepPotModeDeviPtExptPrecomputed, cpu_lmp_list_std) { + using VALUETYPE = TypeParam; + std::vector& coord = this->coord; + std::vector& atype = this->atype; + std::vector& box = this->box; + std::vector& fparam = this->fparam; + std::vector& aparam = this->aparam; + std::vector& expected_md_f = this->expected_md_f; + std::vector& expected_md_v = this->expected_md_v; + deepmd::DeepPotModelDevi& dp_md = this->dp_md; + float rc = dp_md.cutoff(); + int nloc = coord.size() / 3; + std::vector coord_cpy; + std::vector atype_cpy, mapping; + std::vector > nlist_data; + _build_nlist(nlist_data, coord_cpy, atype_cpy, mapping, coord, + atype, box, rc); + int nall = coord_cpy.size() / 3; + std::vector ilist(nloc), numneigh(nloc); + std::vector firstneigh(nloc); + deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]); + convert_nlist(inlist, nlist_data); + + int nmodel = 2; + std::vector emd; + std::vector > fmd_, fmd(nmodel), vmd; + std::vector > aemd(nmodel), aemd_, avmd(nmodel), avmd_; + dp_md.compute(emd, fmd_, vmd, aemd_, avmd_, coord_cpy, atype_cpy, box, + nall - nloc, inlist, 0, fparam, aparam); + for (int kk = 0; kk < nmodel; ++kk) { + _fold_back(fmd[kk], fmd_[kk], mapping, nloc, nall, 3); + _fold_back(avmd[kk], avmd_[kk], mapping, nloc, nall, 9); + aemd[kk].resize(nloc); + for (int ii = 0; ii < nloc; ++ii) { + aemd[kk][ii] = aemd_[kk][ii]; + } + } + + // dp compute std f + std::vector avg_f, std_f; + dp_md.compute_avg(avg_f, fmd); + dp_md.compute_std_f(std_f, avg_f, fmd); + EXPECT_LT(fabs(mymax_ptexpt(std_f) - expected_md_f[0]), EPSILON); + EXPECT_LT(fabs(mymin_ptexpt(std_f) - expected_md_f[1]), EPSILON); + EXPECT_LT(fabs(myavg_ptexpt(std_f) - expected_md_f[2]), EPSILON); + + // dp compute std v + // normalize virial by number of atoms + for (size_t ii = 0; ii < vmd.size(); ++ii) { + for (size_t jj = 0; jj < vmd[ii].size(); ++jj) { + vmd[ii][jj] /= VALUETYPE(atype.size()); + } + } + std::vector avg_v, std_v; + dp_md.compute_avg(avg_v, vmd); + dp_md.compute_std(std_v, avg_v, vmd, 1); + EXPECT_LT(fabs(mymax_ptexpt(std_v) - expected_md_v[0]), EPSILON); + EXPECT_LT(fabs(mymin_ptexpt(std_v) - expected_md_v[1]), EPSILON); + EXPECT_LT(fabs(mystd_ptexpt(std_v) - expected_md_v[2]), EPSILON); +} diff --git a/source/install/test_cc_local.sh b/source/install/test_cc_local.sh index 9edf46f00e..0866eb45b8 100755 --- a/source/install/test_cc_local.sh +++ b/source/install/test_cc_local.sh @@ -71,6 +71,7 @@ else: env ${_GEN_ENV} python ${INFER_SCRIPT_PATH}/gen_dpa2.py env ${_GEN_ENV} python ${INFER_SCRIPT_PATH}/gen_dpa3.py env ${_GEN_ENV} python ${INFER_SCRIPT_PATH}/gen_fparam_aparam.py + env ${_GEN_ENV} python ${INFER_SCRIPT_PATH}/gen_model_devi.py fi if [ "${ENABLE_PADDLE:-TRUE}" == "TRUE" ]; then PADDLE_INFERENCE_DIR=${BUILD_TMP_DIR}/paddle_inference_install_dir diff --git a/source/tests/infer/gen_model_devi.py b/source/tests/infer/gen_model_devi.py new file mode 100644 index 0000000000..cb8dbf7d97 --- /dev/null +++ b/source/tests/infer/gen_model_devi.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: LGPL-3.0-or-later +"""Generate model_devi_md0.pt2 and model_devi_md1.pt2 test models. + +Creates two SE(A) models with fparam/aparam + default_fparam, using different +seeds so they produce different weights. This gives meaningful deviations for +DeepPotModelDevi tests. Prints precomputed reference values for C++ tests. +""" + +import copy +import os +import sys + +import numpy as np + +# Ensure the source tree is on the path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..", "..")) + +from gen_common import ( + ensure_inductor_compiler, + load_custom_ops, +) + + +def main(): + from deepmd.dpmodel.model.model import ( + get_model, + ) + + ensure_inductor_compiler() + + # ---- 1. Model config (SE(A) + fparam + aparam + default_fparam) ---- + config = { + "type_map": ["O"], + "descriptor": { + "type": "se_e2_a", + "sel": [60], + "rcut": 6.0, + "rcut_smth": 1.8, + "neuron": [5, 10, 20], + "axis_neuron": 8, + "activation_function": "tanh", + "resnet_dt": False, + "type_one_side": True, + "exclude_types": [], + "set_davg_zero": False, + "precision": "default", + "trainable": True, + "seed": 1, + }, + "fitting_net": { + "type": "ener", + "neuron": [5, 5, 5], + "activation_function": "tanh", + "resnet_dt": True, + "numb_fparam": 1, + "numb_aparam": 1, + "default_fparam": [0.25852028], + "precision": "default", + "seed": 1, + "atom_ener": [], + "rcond": 0.001, + "trainable": True, + "use_aparam_as_mask": False, + }, + } + + # ---- 2. Build two models with different seeds ---- + from deepmd.pt.utils.serialization import ( # noqa: F401 + deserialize_to_file, + ) + from deepmd.pt_expt.utils.serialization import ( + deserialize_to_file as pt_expt_deserialize_to_file, + ) + + # Load custom ops after deepmd.pt import to avoid double registration + load_custom_ops() + + base_dir = os.path.dirname(__file__) + + models = [] + for idx, seed in enumerate([1, 2]): + cfg = copy.deepcopy(config) + cfg["descriptor"]["seed"] = seed + cfg["fitting_net"]["seed"] = seed + model = get_model(cfg) + model_dict = model.serialize() + data = { + "model": model_dict, + "model_def_script": cfg, + "backend": "dpmodel", + "software": "deepmd-kit", + "version": "3.0.0", + } + pt2_path = os.path.join(base_dir, f"model_devi_md{idx}.pt2") + print(f"Exporting to {pt2_path} ...") # noqa: T201 + pt_expt_deserialize_to_file(pt2_path, copy.deepcopy(data)) + models.append(pt2_path) + + print("Export done.") # noqa: T201 + + # ---- 3. Run inference via DeepPot ---- + from deepmd.infer import ( + DeepPot, + ) + + coord = np.array( + [ + 12.83, + 2.56, + 2.18, + 12.09, + 2.87, + 2.74, + 0.25, + 3.32, + 1.68, + 3.36, + 3.00, + 1.81, + 3.51, + 2.51, + 2.60, + 4.27, + 3.22, + 1.56, + ], + dtype=np.float64, + ) + atype = [0, 0, 0, 0, 0, 0] + box = np.array([13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0], dtype=np.float64) + fparam_val = np.array([0.25852028], dtype=np.float64) + aparam_val = np.array([0.25852028] * 6, dtype=np.float64) + + # Inference with explicit fparam for both models + for idx, pt2_path in enumerate(models): + dp = DeepPot(pt2_path) + + # With explicit fparam + aparam + e, f, v, ae, av = dp.eval( + coord, + box, + atype, + fparam=fparam_val, + aparam=aparam_val, + atomic=True, + ) + + # Note: default_fparam path is tested at the C++ level; + # the Python pt2 runner filters None args so it can't be tested here. + + atom_energy = ae[0, :, 0] + force = f[0] + atom_virial = av[0] + + print(f"\n// ---- Model {idx} reference values (LAMMPS nlist path) ----") # noqa: T201 + print(f"// Total energy: {e[0, 0]:.18e}") # noqa: T201 + print() # noqa: T201 + print(f" // model {idx}") # noqa: T201 + print(" std::vector expected_e = {") # noqa: T201 + for ii, ev in enumerate(atom_energy): + comma = "," if ii < len(atom_energy) - 1 else "" + print(f" {ev:.18e}{comma}") # noqa: T201 + print(" };") # noqa: T201 + + print(" std::vector expected_f = {") # noqa: T201 + force_flat = force.flatten() + for ii, fv in enumerate(force_flat): + comma = "," if ii < len(force_flat) - 1 else "" + print(f" {fv:.18e}{comma}") # noqa: T201 + print(" };") # noqa: T201 + + print(" std::vector expected_v = {") # noqa: T201 + virial_flat = atom_virial.flatten() + for ii, vv in enumerate(virial_flat): + comma = "," if ii < len(virial_flat) - 1 else "" + print(f" {vv:.18e}{comma}") # noqa: T201 + print(" };") # noqa: T201 + + # ---- 4. Compute deviation stats (LAMMPS nlist) ---- + dp0 = DeepPot(models[0]) + dp1 = DeepPot(models[1]) + + e0, f0, v0, ae0, av0 = dp0.eval( + coord, box, atype, fparam=fparam_val, aparam=aparam_val, atomic=True + ) + e1, f1, v1, ae1, av1 = dp1.eval( + coord, box, atype, fparam=fparam_val, aparam=aparam_val, atomic=True + ) + + nloc = len(atype) + nmodel = 2 + + # std_f: per-atom force std + f0_flat = f0[0].flatten() # (nloc*3,) + f1_flat = f1[0].flatten() + std_f = np.zeros(nloc) + for ii in range(nloc): + avg_f = np.zeros(3) + for dd in range(3): + avg_f[dd] = (f0_flat[ii * 3 + dd] + f1_flat[ii * 3 + dd]) / nmodel + std_val = 0.0 + for kk, fk in enumerate([f0_flat, f1_flat]): + for dd in range(3): + tmp = fk[ii * 3 + dd] - avg_f[dd] + std_val += tmp * tmp + std_val /= nmodel + std_f[ii] = np.sqrt(std_val) + + max_std_f = np.max(std_f) + min_std_f = np.min(std_f) + avg_std_f = np.mean(std_f) + + print("\n// ---- Deviation stats ----") # noqa: T201 + print(f"// std_f: max={max_std_f:.18e} min={min_std_f:.18e} avg={avg_std_f:.18e}") # noqa: T201 + + # std_v: per-component virial std (virial normalized by natoms) + v0_flat = v0[0].flatten() / nloc # (9,) + v1_flat = v1[0].flatten() / nloc + std_v = np.zeros(9) + for ii in range(9): + avg_v = (v0_flat[ii] + v1_flat[ii]) / nmodel + std_val = 0.0 + for vk in [v0_flat, v1_flat]: + tmp = vk[ii] - avg_v + std_val += tmp * tmp + std_val /= nmodel + std_v[ii] = np.sqrt(std_val) + + max_std_v = np.max(std_v) + min_std_v = np.min(std_v) + # mystd: sqrt(mean(x^2)) + mystd_v = np.sqrt(np.mean(std_v**2)) + + print(f"// std_v: max={max_std_v:.18e} min={min_std_v:.18e} mystd={mystd_v:.18e}") # noqa: T201 + + print("\n std::vector expected_md_f = {") # noqa: T201 + print(f" {max_std_f:.18e},") # noqa: T201 + print(f" {min_std_f:.18e},") # noqa: T201 + print(f" {avg_std_f:.18e}}}; // max min avg") # noqa: T201 + print(" std::vector expected_md_v = {") # noqa: T201 + print(f" {max_std_v:.18e},") # noqa: T201 + print(f" {min_std_v:.18e},") # noqa: T201 + print(f" {mystd_v:.18e}}}; // max min mystd") # noqa: T201 + + print("\nDone!") # noqa: T201 + + +if __name__ == "__main__": + main()