|
2 | 2 | Module containing the :py:class:`AbsoluteError` and :py:class:`SquaredError` classes.
|
3 | 3 | """
|
4 | 4 |
|
5 |
| -from .._backend import TensorMap |
6 |
| -from .module_map import ModuleMap |
| 5 | +import torch |
7 | 6 |
|
| 7 | +from .._backend import TensorBlock, TensorMap, equal_metadata, equal_metadata_block |
8 | 8 |
|
9 |
| -class AbsoluteError(ModuleMap): |
10 | 9 |
|
11 |
| - def __init__(self): |
12 |
| - pass |
| 10 | +def absolute_error(A: TensorMap, B: TensorMap) -> TensorMap: |
| 11 | + if not equal_metadata(A, B): |
| 12 | + raise ValueError( |
| 13 | + "The two maps must have the same metadata in `absolute_error`." |
| 14 | + ) |
13 | 15 |
|
14 |
| - def __call__(self, A: TensorMap, B: TensorMap) -> TensorMap: |
15 |
| - pass |
| 16 | + keys = [] |
| 17 | + blocks = [] |
| 18 | + for key, block_A in A.items(): |
| 19 | + block_B = B.block(key) |
| 20 | + keys.append(key) |
| 21 | + blocks.append(absolute_error_block(block_A, block_B)) |
16 | 22 |
|
| 23 | + return TensorMap(keys, blocks) |
17 | 24 |
|
18 |
| -class SquaredError(ModuleMap): |
19 | 25 |
|
20 |
| - def __init__(self): |
21 |
| - pass |
| 26 | +def absolute_error_block(A: TensorBlock, B: TensorBlock) -> TensorBlock: |
| 27 | + if not equal_metadata_block(A, B): |
| 28 | + raise ValueError( |
| 29 | + "The two blocks must have the same metadata in `absolute_error_block`." |
| 30 | + ) |
22 | 31 |
|
23 |
| - def __call__(self, A: TensorMap, B: TensorMap) -> TensorMap: |
24 |
| - pass |
| 32 | + values = torch.abs(A.values - B.values) |
| 33 | + block = TensorBlock( |
| 34 | + values=values, |
| 35 | + samples=A.samples, |
| 36 | + components=A.components, |
| 37 | + properties=A.properties, |
| 38 | + ) |
| 39 | + for gradient_name, gradient_A in A.gradients(): |
| 40 | + gradient_B = B.gradient(gradient_name) |
| 41 | + block.add_gradient( |
| 42 | + gradient_name, |
| 43 | + absolute_error_block(gradient_A, gradient_B), |
| 44 | + ) |
| 45 | + |
| 46 | + return block |
| 47 | + |
| 48 | + |
| 49 | +def squared_error(A: TensorMap, B: TensorMap) -> TensorMap: |
| 50 | + if not equal_metadata(A, B): |
| 51 | + raise ValueError("The two maps must have the same metadata in `squared_error`.") |
| 52 | + |
| 53 | + keys = [] |
| 54 | + blocks = [] |
| 55 | + for key, block_A in A.items(): |
| 56 | + block_B = B.block(key) |
| 57 | + keys.append(key) |
| 58 | + blocks.append(squared_error_block(block_A, block_B)) |
| 59 | + |
| 60 | + return TensorMap(keys, blocks) |
| 61 | + |
| 62 | + |
| 63 | +def squared_error_block(A: TensorBlock, B: TensorBlock) -> TensorBlock: |
| 64 | + if not equal_metadata_block(A, B): |
| 65 | + raise ValueError( |
| 66 | + "The two blocks must have the same metadata in `squared_error_block`." |
| 67 | + ) |
| 68 | + |
| 69 | + values = (A.values - B.values) ** 2 |
| 70 | + block = TensorBlock( |
| 71 | + values=values, |
| 72 | + samples=A.samples, |
| 73 | + components=A.components, |
| 74 | + properties=A.properties, |
| 75 | + ) |
| 76 | + for gradient_name, gradient_A in A.gradients(): |
| 77 | + gradient_B = B.gradient(gradient_name) |
| 78 | + block.add_gradient( |
| 79 | + gradient_name, |
| 80 | + squared_error_block(gradient_A, gradient_B), |
| 81 | + ) |
| 82 | + |
| 83 | + return block |
0 commit comments