Skip to content

Commit

Permalink
Add testing utilities to perform finite difference for operations
Browse files Browse the repository at this point in the history
- Add finite difference test for add operation
- Add dispatch functions to add tests for torch array backend

Co-authored-by: Divya Suman <[email protected]>
  • Loading branch information
agoscinski and DivyaSuman14 committed Feb 5, 2024
1 parent 111f0f9 commit 0246ca9
Show file tree
Hide file tree
Showing 6 changed files with 236 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -60,3 +60,5 @@
from .subtract import subtract # noqa
from .unique_metadata import unique_metadata, unique_metadata_block # noqa
from .zeros_like import zeros_like, zeros_like_block # noqa

from . import _testing # noqa
14 changes: 14 additions & 0 deletions python/metatensor-operations/metatensor/operations/_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,20 @@ def _check_all_np_ndarray(arrays):
)


def sum(array, axis: Optional[int] = None):
"""
Returns the sum of the elements in the array at the axis.
It is equivalent of np.sum(array, axis=axis) and torch.sum(tensor, dim=axis)
"""
if isinstance(array, TorchTensor):
return torch.sum(array, dim=axis)
elif isinstance(array, np.ndarray):
return np.sum(array, axis=axis).astype(array.dtype)
else:
raise TypeError(UNKNOWN_ARRAY_TYPE)


def abs(array):
"""
Returns the absolute value of the elements in the array.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from ._creation_operation import cartesian_cubic, cartesian_linear
from ._grad import finite_differences

__all__ = ["cartesian_cubic", "cartesian_linear", "finite_differences"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
from typing import Union

import numpy as np

from .. import _dispatch
from .._classes import Labels, TensorBlock, TensorMap
from .._dispatch import TorchTensor
from ..block_from_array import block_from_array


def cartesian_cubic(
cartesian_vector: Union[np.ndarray, TorchTensor], compute_grad: bool = False
) -> TensorMap:
"""
Creates a tensor map from a set of Cartesian vectors together with gradients if
:param compute_grad: is `True` according to the function:
.. math::
f(x, y, z) = x^3 + y^3 + z^3
\\nabla f = (3x^2, 3y^2, 3z^2)
:param cartesian_vector: Set of Cartesian vectors with shape (n_samples, 3)
:param compute_grad: Specifies if the returned tensor map should contain the
gradients
"""

cartesian_vector_cubic = cartesian_vector**3
values = _dispatch.sum(cartesian_vector_cubic, axis=1).reshape(-1, 1)
if compute_grad:
values_grad = _dispatch.zeros_like(cartesian_vector, (len(values), 3, 1))
values_grad[:, 0] = 3 * cartesian_vector[:, 0:1] ** 2
values_grad[:, 1] = 3 * cartesian_vector[:, 1:2] ** 2
values_grad[:, 2] = 3 * cartesian_vector[:, 2:3] ** 2

block = block_from_array(values)
if compute_grad:
block.add_gradient(
parameter="positions",
gradient=TensorBlock(
values=values_grad,
samples=Labels.range("sample", len(values)),
components=[Labels.range("cartesian", 3)],
properties=block.properties,
),
)
return TensorMap(Labels.range("_", 1), [block])


def cartesian_linear(
cartesian_vector: Union[np.ndarray, TorchTensor], compute_grad: bool = False
) -> TensorMap:
"""
Creates a tensor map from a set of Cartesian vectors together with gradients if
:param compute_grad: is `True` according to the function:
.. math::
f(x, y, z) = 3x + 2y + 8*z + 4
\\nabla f = (3, 2, 8)
:param cartesian_vector: Set of Cartesian vectors with shape (n_samples, 3)
:param compute_grad: Specifies if the returned tensor map should contain the
gradients
"""

cartesian_vector_linear = (
3 * cartesian_vector[:, 0]
+ 2 * cartesian_vector[:, 1]
+ 8 * cartesian_vector[:, 2]
+ 4
)
values = cartesian_vector_linear.reshape(-1, 1)
if compute_grad:
values_grad = _dispatch.zeros_like(cartesian_vector, (len(values), 3, 1))
values_grad[:, 0] = 3 * _dispatch.ones_like(cartesian_vector, (len(values), 1))
values_grad[:, 1] = 2 * _dispatch.ones_like(cartesian_vector, (len(values), 1))
values_grad[:, 2] = 8 * _dispatch.ones_like(cartesian_vector, (len(values), 1))

block = block_from_array(values)
if compute_grad:
block.add_gradient(
parameter="positions",
gradient=TensorBlock(
values=values_grad,
samples=Labels.range("sample", len(values)),
components=[Labels.range("cartesian", 3)],
properties=block.properties,
),
)
return TensorMap(Labels.range("_", 1), [block])
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
from typing import Callable, Union

import numpy as np
from numpy.testing import assert_allclose

from .. import _dispatch
from .._classes import TensorMap
from .._dispatch import TorchTensor


def finite_differences(
function: Callable[[Union[np.ndarray, TorchTensor], bool], TensorMap],
input_array: Union[np.ndarray, TorchTensor],
parameter: str = "positions",
displacement: float = 1e-6,
rtol: float = 1e-5,
atol: float = 1e-16,
) -> None:
"""
Check that analytical gradients with respect to the :param parameter: agree with a
finite difference calculation of the gradients. The callable must be able to return
the analtyical gradients optionally if input argument `compute_grad` is true so it
can be tested here. The dimension of the gradients are supposed to be in the
components. For example if the gradients are taken with respect to Cartesian
coordinates the :param function: outputs a tensor map of gradients_with 3
components.
:param function: a function that outputs a tensor map (with gradients if specified
by input parameter `compute_grad`) from the :param input_array:.
:param input_array: an input for which the analytical and numerical gradients are
tested
:param parameter: the parameter of the gradient that is checked
:param displacement: distance each atom will be displaced in each direction when
computing finite differences
:param max_relative: Maximal relative error. ``10 * displacement`` is a good
starting point
:param atol: Threshold below which all values are considered zero. This should be
very small (1e-16) to prevent false positives (if all values & gradients are
below that threshold, tests will pass even with wrong gradients)
:raises AssertionError: if the two gradients are not equal up to specified precision
"""
reference = function(input_array, compute_grad=True)
dim_gradients = len(reference[0].gradient(parameter).components)
for spatial in range(dim_gradients):
input_pos = _dispatch.copy(input_array)
input_pos[:, spatial] += displacement / 2
updated_pos = function(input_pos)

input_neg = _dispatch.copy(input_array)
input_neg[:, spatial] -= displacement / 2
updated_neg = function(input_neg)

assert updated_pos.keys == reference.keys
assert updated_neg.keys == reference.keys

for key, block in reference.items():
gradients = block.gradient(parameter)

block_pos = updated_pos.block(key)
block_neg = updated_neg.block(key)

for gradient_i, sample_labels in enumerate(gradients.samples):
sample_i = sample_labels[0]

# check that the sample is the same in both descriptors
assert block_pos.samples[sample_i] == block.samples[sample_i]
assert block_neg.samples[sample_i] == block.samples[sample_i]

value_pos = block_pos.values[sample_i]
value_neg = block_neg.values[sample_i]
gradient = gradients.values[gradient_i, spatial]

assert value_pos.shape == gradient.shape
assert value_neg.shape == gradient.shape

finite_difference = (value_pos - value_neg) / displacement

assert_allclose(finite_difference, gradient, rtol=rtol, atol=atol)
45 changes: 45 additions & 0 deletions python/metatensor-operations/tests/add.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,30 @@

import metatensor
from metatensor import Labels, TensorBlock, TensorMap
from metatensor.operations._testing import (
cartesian_cubic,
cartesian_linear,
finite_differences,
)


try:
import torch # noqa

HAS_TORCH = True
except ImportError:
HAS_TORCH = False


@pytest.fixture(scope="module", autouse=True)
def set_random_generator():
"""Set the random generator to same seed before each test is run.
Otherwise test behaviour is dependend on the order of the tests
in this file and the number of parameters of the test.
"""
np.random.seed(1225787)
if HAS_TORCH:
torch.manual_seed(1225787)


@pytest.fixture
Expand Down Expand Up @@ -258,3 +282,24 @@ def test_self_add_error():
)
with pytest.raises(TypeError, match=message):
metatensor.add(tensor, np.ones((3, 4)))


def test_add_finite_difference():
def add_callable(cartesian_vectors, compute_grad=False):
tensor1 = cartesian_linear(cartesian_vectors, compute_grad)
tensor2 = cartesian_cubic(cartesian_vectors, compute_grad)
return metatensor.add(tensor1, tensor2)

input_array = np.random.rand(5, 3)
finite_differences(add_callable, input_array, "positions")


@pytest.mark.skipif(not HAS_TORCH, reason="requires torch")
def test_torch_add_finite_difference():
def add_callable(cartesian_vectors, compute_grad=False):
tensor1 = cartesian_linear(cartesian_vectors, compute_grad)
tensor2 = cartesian_cubic(cartesian_vectors, compute_grad)
return metatensor.add(tensor1, tensor2)

input_array = torch.rand(5, 3, dtype=torch.float64)
finite_differences(add_callable, input_array, "positions")

0 comments on commit 0246ca9

Please sign in to comment.