Skip to content

Add extrapolation level calcualtion #936

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 45 commits into from
Mar 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
cc0deb9
reading asi file
psn417 Nov 26, 2024
52a326d
Merge branch 'brucefan1983:master' into newest
psn417 Nov 26, 2024
b2a1b63
almost done, need debug
psn417 Nov 27, 2024
c026b3b
add dump
psn417 Nov 27, 2024
7f631fb
testing
psn417 Nov 27, 2024
e30c499
debug
psn417 Dec 1, 2024
2fa1b97
debug
psn417 Dec 2, 2024
3726fb9
change asi to float
psn417 Dec 2, 2024
c0be72e
Merge branch 'brucefan1983:master' into newest
psn417 Dec 2, 2024
12ec8f8
add doc
psn417 Dec 10, 2024
9588e08
Merge branch 'brucefan1983:master' into newest
psn417 Dec 10, 2024
2594abd
Merge branch 'brucefan1983:master' into newest
psn417 Dec 18, 2024
03bb8d9
Merge branch 'brucefan1983:master' into master
psn417 Jan 2, 2025
d88732d
Merge branch 'brucefan1983:master' into master
psn417 Jan 14, 2025
005b8ed
Merge branch 'brucefan1983:master' into master
psn417 Jan 24, 2025
fbf9476
debug
psn417 Jan 25, 2025
f77f47f
debug
psn417 Jan 26, 2025
d8a60e0
debug
psn417 Jan 30, 2025
d099bc4
Merge branch 'brucefan1983:master' into master
psn417 Feb 17, 2025
9244abd
Merge branch 'brucefan1983:master' into master
psn417 Feb 20, 2025
59c8480
Merge branch 'brucefan1983:master' into master
psn417 Mar 2, 2025
fe499e2
Merge branch 'master' of https://github.com/brucefan1983/GPUMD
psn417 Mar 2, 2025
78f5809
resolve problems
psn417 Mar 6, 2025
88d8bde
fix typo
psn417 Mar 6, 2025
5448ce4
fix typo
psn417 Mar 6, 2025
9c80271
Merge branch 'brucefan1983:master' into master
psn417 Mar 7, 2025
d8923e0
refactoring
psn417 Mar 7, 2025
1efe82f
Merge branch 'master' of https://github.com/psn417/GPUMD
psn417 Mar 7, 2025
e301bc9
debug
psn417 Mar 7, 2025
e63cad5
add name
psn417 Mar 7, 2025
38e32b6
speed up
psn417 Mar 7, 2025
e0921a6
Revert "speed up"
psn417 Mar 8, 2025
d8d4871
add cublas
psn417 Mar 8, 2025
a8ab142
Revert "speed up"
psn417 Mar 8, 2025
e613a5d
Merge branch 'cublas'
psn417 Mar 8, 2025
1b2fb1f
clean
psn417 Mar 8, 2025
5724a9d
add macros
psn417 Mar 8, 2025
67dd766
change include
psn417 Mar 8, 2025
54bf51c
change matrix
psn417 Mar 9, 2025
e2ffacd
change cuda version
psn417 Mar 10, 2025
146d4d5
better code
psn417 Mar 10, 2025
5960ce7
Merge branch 'master' of github.com:psn417/GPUMD
psn417 Mar 10, 2025
5ee9ee4
fix typo
psn417 Mar 10, 2025
0f9c250
remove space
psn417 Mar 17, 2025
9c647c6
fix bug
psn417 Mar 18, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/cuda-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ jobs:
build:
runs-on: ubuntu-latest
container:
image: nvidia/cuda:11.2.2-devel-ubuntu20.04 # 使用适合你的CUDA版本的官方NVIDIA容器
image: nvidia/cuda:12.2.2-devel-ubuntu22.04 # 使用适合你的CUDA版本的官方NVIDIA容器


steps:
- name: Checkout code
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/cuda_build_windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ jobs:

- name: Install CUDA
run: |
choco install cuda --version=11.2.2.46133
choco install cuda --version=12.2.0.53625

- name: Build project
shell: cmd
run: |
set PATH=%PATH%;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\bin
set PATH=%PATH%;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.2\bin
nvcc --version
cd src && make

11 changes: 11 additions & 0 deletions doc/bibliography.rst
Original file line number Diff line number Diff line change
Expand Up @@ -273,3 +273,14 @@ Bibliography
| Computational Materials Science **207**, 111275 (2022)
| DOI: `10.1016/j.commatsci.2022.111275 <https://doi.org/10.1016/j.commatsci.2022.111275>`_

.. [Podryabinkin2023]
| Evgeny Podryabinkin, Kamil Garifullin, Alexander Shapeev, and Ivan Novikov
| *MLIP-3: Active learning on atomic environments with moment tensor potentials*
| J. Chem. Phys. **159**, 084112 (2023)
| DOI: `10.1063/5.0155887 <https://doi.org/10.1063/5.0155887>`_

.. [Lysogorskiy2023]
| Yury Lysogorskiy, Anton Bochkarev, Matous Mrovec, and Ralf Drautz
| *Active learning strategies for atomic cluster expansion models*
| Phys. Rev. M **7**, 043801 (2023)
| DOI: `10.1103/PhysRevMaterials.7.043801 <https://doi.org/10.1103/PhysRevMaterials.7.043801>`_
44 changes: 44 additions & 0 deletions doc/gpumd/input_parameters/compute_extrapolation.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
.. _kw_compute_extrapolation:
.. index::
single: compute_extrapolation (keyword in run.in)

:attr:`compute_extrapolation`
=============================

This keyword is used to compute the extrapolation grade of structures in an NEP potential.

The extrapolation grade `gamma` can be considered as the uncertainty of a structure relative to the training set.

A structure with large `gamma` tends to have higher energy and force errors.

Similiar methods have been applied to MTP ([Podryabinkin2023]_) and ACE ([Lysogorskiy2023]_). You can refer to their papers for more details.

Before computing `gamma`, you need to obtain an `active set` from your training set. There are some Python scripts to do it <https://github.com/psn417/nep_active>.

There are also some Python scripts to perform active learning automatically <https://github.com/psn417/nep_maker>.

Syntax
------

This keyword is used as follows::

compute_extrapolation asi_file <asi_file> gamma_low <gamma_low> gamma_high <gamma_high> check_interval <check_interval> dump_interval <dump_interval>

:attr:`asi_file` is the name of the Active Set Inversion (ASI) file. This file is generate by the Python script in <https://github.com/psn417/nep_active>.

:attr:`gamma_low`: Only if the max gamma value of a structure exceeds `gamma_low`, then the structure will be dumped into `extrapolation_dump.xyz` file. The default value is `0`.

:attr:`gamma_high`: If the max gamma value of a structure exceeds `gamma_high`, then the simulation will stop. The default value is very large so it will never stop.

:attr:`check_interval`: Since calculating gamma value is slow, you can check the gamma value every `check_interval` steps. The default value is `1` (check every step).

:attr:`dump_interval`: You can set the minimum interval between dumps to `dump_interval` steps. The default value is `1`.

Example
-------

.. code::

compute_extrapolation asi_file active_set.asi gamma_low 5 gamma_high 10 check_interval 10 dump_interval 10

This means that the structures with max gamma between 5-10 will be dumped. The gamma value will be checked every 10 steps.
1 change: 1 addition & 0 deletions doc/gpumd/input_parameters/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ Below you can find a listing of keywords for the ``run.in`` input file.
velocity
correct_velocity
potential
compute_extrapolation
dftd3
change_box
deform
Expand Down
83 changes: 65 additions & 18 deletions src/force/nep.cu
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ heat transport, Phys. Rev. B. 104, 104309 (2021).
#include "utilities/error.cuh"
#include "utilities/gpu_macro.cuh"
#include "utilities/nep_utilities.cuh"
#include <cstring>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <cstring>

const std::string ELEMENTS[NUM_ELEMENTS] = {
"H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne", "Na", "Mg", "Al", "Si", "P", "S",
Expand Down Expand Up @@ -357,6 +357,7 @@ NEP::NEP(const char* file_potential, const int num_atoms)
#endif

initialize_dftd3();
B_projection_size = annmb.num_neurons1 * (annmb.dim + 2);
}

NEP::~NEP(void)
Expand Down Expand Up @@ -587,7 +588,10 @@ static __global__ void find_descriptor(
double* g_pe,
float* g_Fp,
double* g_virial,
float* g_sum_fxyz)
float* g_sum_fxyz,
bool need_B_projection,
double* B_projection,
int B_projection_size)
{
int n1 = blockIdx.x * blockDim.x + threadIdx.x + N1;
if (n1 < N2) {
Expand Down Expand Up @@ -695,7 +699,8 @@ static __global__ void find_descriptor(
accumulate_s(paramb.L_max, d12, x12, y12, z12, gn12, s);
#endif
}
find_q(paramb.L_max, paramb.num_L, paramb.n_max_angular + 1, n, s, q + (paramb.n_max_radial + 1));
find_q(
paramb.L_max, paramb.num_L, paramb.n_max_angular + 1, n, s, q + (paramb.n_max_radial + 1));
for (int abc = 0; abc < NUM_OF_ABC; ++abc) {
g_sum_fxyz[(n * NUM_OF_ABC + abc) * N + n1] = s[abc];
}
Expand Down Expand Up @@ -747,16 +752,29 @@ static __global__ void find_descriptor(
F,
Fp);
} else {
apply_ann_one_layer(
annmb.dim,
annmb.num_neurons1,
annmb.w0[t1],
annmb.b0[t1],
annmb.w1[t1],
annmb.b1,
q,
F,
Fp);
if (!need_B_projection)
apply_ann_one_layer(
annmb.dim,
annmb.num_neurons1,
annmb.w0[t1],
annmb.b0[t1],
annmb.w1[t1],
annmb.b1,
q,
F,
Fp);
else
apply_ann_one_layer(
annmb.dim,
annmb.num_neurons1,
annmb.w0[t1],
annmb.b0[t1],
annmb.w1[t1],
annmb.b1,
q,
F,
Fp,
B_projection + n1 * B_projection_size);
}
g_pe[n1] += F;

Expand Down Expand Up @@ -979,7 +997,18 @@ static __global__ void find_partial_force_angular(
g_gn_angular[index_left_all] * weight_left + g_gn_angular[index_right_all] * weight_right;
float gnp12 = g_gnp_angular[index_left_all] * weight_left +
g_gnp_angular[index_right_all] * weight_right;
accumulate_f12(paramb.L_max, paramb.num_L, n, paramb.n_max_angular + 1, d12, r12, gn12, gnp12, Fp, sum_fxyz, f12);
accumulate_f12(
paramb.L_max,
paramb.num_L,
n,
paramb.n_max_angular + 1,
d12,
r12,
gn12,
gnp12,
Fp,
sum_fxyz,
f12);
}
#else
float fc12, fcp12;
Expand Down Expand Up @@ -1007,7 +1036,18 @@ static __global__ void find_partial_force_angular(
gn12 += fn12[k] * annmb.c[c_index];
gnp12 += fnp12[k] * annmb.c[c_index];
}
accumulate_f12(paramb.L_max, paramb.num_L, n, paramb.n_max_angular + 1, d12, r12, gn12, gnp12, Fp, sum_fxyz, f12);
accumulate_f12(
paramb.L_max,
paramb.num_L,
n,
paramb.n_max_angular + 1,
d12,
r12,
gn12,
gnp12,
Fp,
sum_fxyz,
f12);
}
#endif
g_f12x[index] = f12[0];
Expand Down Expand Up @@ -1234,7 +1274,10 @@ void NEP::compute_large_box(
potential_per_atom.data(),
nep_data.Fp.data(),
virial_per_atom.data(),
nep_data.sum_fxyz.data());
nep_data.sum_fxyz.data(),
need_B_projection,
B_projection,
B_projection_size);
GPU_CHECK_KERNEL

bool is_dipole = paramb.model_type == 1;
Expand Down Expand Up @@ -1416,7 +1459,10 @@ void NEP::compute_small_box(
potential_per_atom.data(),
nep_data.Fp.data(),
virial_per_atom.data(),
nep_data.sum_fxyz.data());
nep_data.sum_fxyz.data(),
need_B_projection,
B_projection,
B_projection_size);
GPU_CHECK_KERNEL

bool is_dipole = paramb.model_type == 1;
Expand Down Expand Up @@ -1703,7 +1749,8 @@ static __global__ void find_descriptor(
accumulate_s(paramb.L_max, d12, x12, y12, z12, gn12, s);
#endif
}
find_q(paramb.L_max, paramb.num_L, paramb.n_max_angular + 1, n, s, q + (paramb.n_max_radial + 1));
find_q(
paramb.L_max, paramb.num_L, paramb.n_max_angular + 1, n, s, q + (paramb.n_max_radial + 1));
for (int abc = 0; abc < NUM_OF_ABC; ++abc) {
g_sum_fxyz[(n * NUM_OF_ABC + abc) * N + n1] = s[abc];
}
Expand Down
72 changes: 56 additions & 16 deletions src/force/nep_small_box.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
#include "model/box.cuh"
#include "nep.cuh"
#include "utilities/common.cuh"
#include "utilities/nep_utilities.cuh"
#include "utilities/gpu_macro.cuh"
#include "utilities/nep_utilities.cuh"

#ifdef USE_KEPLER
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600)
Expand Down Expand Up @@ -166,7 +166,10 @@ static __global__ void find_descriptor_small_box(
double* g_pe,
float* g_Fp,
double* g_virial,
float* g_sum_fxyz)
float* g_sum_fxyz,
bool need_B_projection,
double* B_projection,
int B_projection_size)
{
int n1 = blockIdx.x * blockDim.x + threadIdx.x + N1;
if (n1 < N2) {
Expand Down Expand Up @@ -263,7 +266,8 @@ static __global__ void find_descriptor_small_box(
accumulate_s(paramb.L_max, d12, r12[0], r12[1], r12[2], gn12, s);
#endif
}
find_q(paramb.L_max, paramb.num_L, paramb.n_max_angular + 1, n, s, q + (paramb.n_max_radial + 1));
find_q(
paramb.L_max, paramb.num_L, paramb.n_max_angular + 1, n, s, q + (paramb.n_max_radial + 1));
for (int abc = 0; abc < NUM_OF_ABC; ++abc) {
g_sum_fxyz[(n * NUM_OF_ABC + abc) * N + n1] = s[abc];
}
Expand Down Expand Up @@ -311,16 +315,29 @@ static __global__ void find_descriptor_small_box(
F,
Fp);
} else {
apply_ann_one_layer(
annmb.dim,
annmb.num_neurons1,
annmb.w0[t1],
annmb.b0[t1],
annmb.w1[t1],
annmb.b1,
q,
F,
Fp);
if (!need_B_projection)
apply_ann_one_layer(
annmb.dim,
annmb.num_neurons1,
annmb.w0[t1],
annmb.b0[t1],
annmb.w1[t1],
annmb.b1,
q,
F,
Fp);
else
apply_ann_one_layer(
annmb.dim,
annmb.num_neurons1,
annmb.w0[t1],
annmb.b0[t1],
annmb.w1[t1],
annmb.b1,
q,
F,
Fp,
B_projection + n1 * B_projection_size);
}
g_pe[n1] += F;

Expand Down Expand Up @@ -452,7 +469,8 @@ static __global__ void find_descriptor_small_box(
accumulate_s(paramb.L_max, d12, r12[0], r12[1], r12[2], gn12, s);
#endif
}
find_q(paramb.L_max, paramb.num_L, paramb.n_max_angular + 1, n, s, q + (paramb.n_max_radial + 1));
find_q(
paramb.L_max, paramb.num_L, paramb.n_max_angular + 1, n, s, q + (paramb.n_max_radial + 1));
for (int abc = 0; abc < NUM_OF_ABC; ++abc) {
g_sum_fxyz[(n * NUM_OF_ABC + abc) * N + n1] = s[abc];
}
Expand Down Expand Up @@ -665,7 +683,18 @@ static __global__ void find_force_angular_small_box(
g_gn_angular[index_left_all] * weight_left + g_gn_angular[index_right_all] * weight_right;
float gnp12 = g_gnp_angular[index_left_all] * weight_left +
g_gnp_angular[index_right_all] * weight_right;
accumulate_f12(paramb.L_max, paramb.num_L, n, paramb.n_max_angular + 1, d12, r12, gn12, gnp12, Fp, sum_fxyz, f12);
accumulate_f12(
paramb.L_max,
paramb.num_L,
n,
paramb.n_max_angular + 1,
d12,
r12,
gn12,
gnp12,
Fp,
sum_fxyz,
f12);
}
#else
float fc12, fcp12;
Expand All @@ -692,7 +721,18 @@ static __global__ void find_force_angular_small_box(
gn12 += fn12[k] * annmb.c[c_index];
gnp12 += fnp12[k] * annmb.c[c_index];
}
accumulate_f12(paramb.L_max, paramb.num_L, n, paramb.n_max_angular + 1, d12, r12, gn12, gnp12, Fp, sum_fxyz, f12);
accumulate_f12(
paramb.L_max,
paramb.num_L,
n,
paramb.n_max_angular + 1,
d12,
r12,
gn12,
gnp12,
Fp,
sum_fxyz,
f12);
}
#endif
double s_sxx = 0.0;
Expand Down
Loading