Skip to content

Commit

Permalink
Add warning for mGGA (#28)
Browse files Browse the repository at this point in the history
  • Loading branch information
mfherbst authored Jan 30, 2023
1 parent d176ced commit 572c614
Show file tree
Hide file tree
Showing 6 changed files with 58 additions and 27 deletions.
2 changes: 1 addition & 1 deletion .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.3.13
current_version = 0.3.14
commit = True
tag = False

Expand Down
3 changes: 2 additions & 1 deletion .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ julia/1.8-n2:
- if: $CI_PIPELINE_SOURCE == "external_pull_request_event"
when: manual
script:
- module load lang/JuliaHPC/1.8.3-foss-2022a-CUDA-11.7.0
# - module load lang/JuliaHPC/1.8.3-foss-2022a-CUDA-11.7.0
- module load lang/Julia/1.8.3-linux-x86_64
- julia --color=yes --project=. -e '
using Pkg;
Pkg.test(; coverage=true, test_args=["gpu"])
Expand Down
4 changes: 3 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
name = "Libxc"
uuid = "66e17ffc-8502-11e9-23b5-c9248d0eb96d"
authors = ["Jason Eu <[email protected]>", "Michael F. Herbst <[email protected]>"]
version = "0.3.13"
version = "0.3.14"

[deps]
CUDA_Runtime_jll = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
Libxc_GPU_jll = "25af9330-9b41-55d4-a324-1a83c0a0a1ac"
Libxc_jll = "a56a6d9d-ad03-58af-ab61-878bf78270d6"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"

[compat]
CUDA_Runtime_jll = "0.2"
Libxc_GPU_jll = "6"
Libxc_jll = "6"
Requires = "1"
Expand Down
4 changes: 2 additions & 2 deletions src/Libxc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ const libxc_version = VersionNumber(XC_VERSION)
const libxc_doi = unsafe_string(Libxc.xc_reference_doi())

"""Is the CUDA version of libxc available on this platform"""
has_cuda() = isdefined(Libxc_GPU_jll, :libxc)
has_cuda() = Libxc_GPU_jll.is_available()

"""Return the list of available libxc functionals as strings"""
function available_functionals()
Expand All @@ -35,7 +35,7 @@ export needs_laplacian

function __init__()
@require CUDA="052768ef-5323-5732-b1bb-66c8b64840ba" begin
@static if isdefined(Libxc_GPU_jll, :libxc)
@static if Libxc_GPU_jll.is_available()
include("evaluate_gpu.jl")
end
end
Expand Down
45 changes: 25 additions & 20 deletions src/evaluate_gpu.jl
Original file line number Diff line number Diff line change
Expand Up @@ -143,19 +143,24 @@ function evaluate!(func::Functional, ::Union{Val{:mgga},Val{:hyb_mgga}}, rho::Cu
v4lapltau3::OptCuArray=CU_NULL,
v4tau4::OptCuArray=CU_NULL)
np = Int(length(rho) / func.spin_dimensions.rho)
@warn "meta-GGAs on GPU seem to be broken at least with Libxc 6.1.0"

pointer = allocate_gpufunctional(func)
@ccall libxc_gpu.xc_gga(
pointer::Ptr{xc_func_type}, np::Csize_t, rho::CuPtr{Cdouble}, sigma::CuPtr{Cdouble},
@ccall libxc_gpu.xc_mgga(
pointer::Ptr{xc_func_type}, np::Csize_t,
rho::CuPtr{Cdouble}, sigma::CuPtr{Cdouble},
lapl::CuPtr{Cdouble}, tau::CuPtr{Cdouble},
#
zk::CuPtr{Cdouble}, vrho::CuPtr{Cdouble}, vsigma::CuPtr{Cdouble},
vlapl::CuPtr{Cdouble}, vtau::CuPtr{Cdouble},
#
v2rho2::CuPtr{Cdouble}, v2rhosigma::CuPtr{Cdouble},
v2rholapl::CuPtr{Cdouble}, v2rhotau::CuPtr{Cdouble},
v2sigma2::CuPtr{Cdouble}, v2sigmalapl::CuPtr{Cdouble},
v2sigmatau::CuPtr{Cdouble}, v2lapl2::CuPtr{Cdouble},
v2lapltau::CuPtr{Cdouble}, v2tau2::CuPtr{Cdouble},
v3rho3::CuPtr{Cdouble}, v3rho2sigma::CuPtr{Cdouble},
#
v3rho2lapl::CuPtr{Cdouble}, v3rho2tau::CuPtr{Cdouble},
v3rhosigma2::CuPtr{Cdouble}, v3rhosigmalapl::CuPtr{Cdouble},
v3rhosigmatau::CuPtr{Cdouble}, v3rholapl2::CuPtr{Cdouble},
Expand All @@ -166,24 +171,24 @@ function evaluate!(func::Functional, ::Union{Val{:mgga},Val{:hyb_mgga}}, rho::Cu
v3lapl3::CuPtr{Cdouble}, v3lapl2tau::CuPtr{Cdouble},
v3lapltau2::CuPtr{Cdouble}, v3tau3::CuPtr{Cdouble},
v4rho4::CuPtr{Cdouble}, v4rho3sigma::CuPtr{Cdouble},
v4rho3lapl::CuPtr{Cdouble}, v4rho3tau::CuPtr{Cdouble},
v4rho2sigma2::CuPtr{Cdouble}, v4rho2sigmalapl::CuPtr{Cdouble},
v4rho2sigmatau::CuPtr{Cdouble}, v4rho2lapl2::CuPtr{Cdouble},
v4rho2lapltau::CuPtr{Cdouble}, v4rho2tau2::CuPtr{Cdouble},
v4rhosigma3::CuPtr{Cdouble}, v4rhosigma2lapl::CuPtr{Cdouble},
v4rhosigma2tau::CuPtr{Cdouble}, v4rhosigmalapl2::CuPtr{Cdouble},
v4rhosigmalapltau::CuPtr{Cdouble}, v4rhosigmatau2::CuPtr{Cdouble},
v4rholapl3::CuPtr{Cdouble}, v4rholapl2tau::CuPtr{Cdouble},
v4rholapltau2::CuPtr{Cdouble}, v4rhotau3::CuPtr{Cdouble},
v4sigma4::CuPtr{Cdouble}, v4sigma3lapl::CuPtr{Cdouble},
v4sigma3tau::CuPtr{Cdouble}, v4sigma2lapl2::CuPtr{Cdouble},
v4sigma2lapltau::CuPtr{Cdouble}, v4sigma2tau2::CuPtr{Cdouble},
v4sigmalapl3::CuPtr{Cdouble}, v4sigmalapl2tau::CuPtr{Cdouble},
v4sigmalapltau2::CuPtr{Cdouble}, v4sigmatau3::CuPtr{Cdouble},
v4lapl4::CuPtr{Cdouble}, v4lapl3tau::CuPtr{Cdouble},
v4lapl2tau2::CuPtr{Cdouble}, v4lapltau3::CuPtr{Cdouble},
v4tau4::CuPtr{Cdouble}
v4rho3lapl::CuPtr{Cdouble},
#
v4rho3tau::CuPtr{Cdouble}, v4rho2sigma2::CuPtr{Cdouble},
v4rho2sigmalapl::CuPtr{Cdouble}, v4rho2sigmatau::CuPtr{Cdouble},
v4rho2lapl2::CuPtr{Cdouble}, v4rho2lapltau::CuPtr{Cdouble},
v4rho2tau2::CuPtr{Cdouble}, v4rhosigma3::CuPtr{Cdouble},
v4rhosigma2lapl::CuPtr{Cdouble}, v4rhosigma2tau::CuPtr{Cdouble},
v4rhosigmalapl2::CuPtr{Cdouble}, v4rhosigmalapltau::CuPtr{Cdouble},
v4rhosigmatau2::CuPtr{Cdouble}, v4rholapl3::CuPtr{Cdouble},
v4rholapl2tau::CuPtr{Cdouble}, v4rholapltau2::CuPtr{Cdouble},
v4rhotau3::CuPtr{Cdouble}, v4sigma4::CuPtr{Cdouble},
v4sigma3lapl::CuPtr{Cdouble}, v4sigma3tau::CuPtr{Cdouble},
v4sigma2lapl2::CuPtr{Cdouble}, v4sigma2lapltau::CuPtr{Cdouble},
v4sigma2tau2::CuPtr{Cdouble}, v4sigmalapl3::CuPtr{Cdouble},
v4sigmalapl2tau::CuPtr{Cdouble}, v4sigmalapltau2::CuPtr{Cdouble},
v4sigmatau3::CuPtr{Cdouble}, v4lapl4::CuPtr{Cdouble},
v4lapl3tau::CuPtr{Cdouble}, v4lapl2tau2::CuPtr{Cdouble},
v4lapltau3::CuPtr{Cdouble}, v4tau4::CuPtr{Cdouble},
)::Cvoid

deallocate_gpufunctional(pointer)
end
27 changes: 25 additions & 2 deletions test/gpu.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ using Libxc
using CUDA

@testset "Properties" begin
@test CUDA.has_cuda()
@test Libxc.has_cuda()
end

@testset "Compare LDA on CPU/GPU" begin
Expand Down Expand Up @@ -33,8 +33,31 @@ end
sigma_d = convert(CuArray, sigma)
res_d = evaluate(Functional(sym; n_spin); rho=rho_d, sigma=sigma_d)

@test maximum(abs, res.zk - Array(res_d.zk)) < 1e-12
@test maximum(abs, res.zk - Array(res_d.zk)) < 1e-12
@test maximum(abs, res.vrho - Array(res_d.vrho)) < 1e-12
@test maximum(abs, res.vsigma - Array(res_d.vsigma)) < 1e-12
end
end


@testset "Compare mGGA without Laplacian" begin
n_p = 12

for n_spin in (1, 2), sym in (:mgga_x_tpss, )
rho = abs.(randn(n_spin, n_p))
sigma = abs.(randn(2n_spin-1, n_p))
tau = abs.(randn(n_spin, n_p))
res = evaluate(Functional(sym; n_spin); rho, sigma, tau)

rho_d = convert(CuArray, rho)
sigma_d = convert(CuArray, sigma)
tau_d = convert(CuArray, tau)
res_d = evaluate(Functional(sym; n_spin);
rho=rho_d, sigma=sigma_d, tau=tau_d)

@test_broken maximum(abs, res.zk - Array(res_d.zk)) < 1e-12
@test_broken maximum(abs, res.vrho - Array(res_d.vrho)) < 1e-12
@test_broken maximum(abs, res.vsigma - Array(res_d.vsigma)) < 1e-12
@test_broken maximum(abs, res.vtau - Array(res_d.vtau)) < 1e-12
end
end

2 comments on commit 572c614

@mfherbst
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/76676

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.3.14 -m "<description of version>" 572c61426cc5ee86a0ed0253c8fbd3168c4c5ddd
git push origin v0.3.14

Please sign in to comment.