Skip to content

Commit e3405cb

Browse files
authored
Merge pull request #29 from scipp/lorentz-correction
Lorentz correction
2 parents f719383 + 39175d3 commit e3405cb

File tree

2 files changed

+268
-0
lines changed

2 files changed

+268
-0
lines changed

src/ess/diffraction/powder/correction.py

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
# SPDX-License-Identifier: BSD-3-Clause
22
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
3+
"""Correction algorithms for powder diffraction."""
4+
35
import scipp as sc
46

57

@@ -44,3 +46,67 @@ def merge_calibration(*, into: sc.DataArray, calibration: sc.Dataset) -> sc.Data
4446
)
4547
out.masks['calibration'] = calibration['mask'].data
4648
return out
49+
50+
51+
def apply_lorentz_correction(da: sc.DataArray) -> sc.DataArray:
52+
"""Perform a Lorentz correction for ToF powder diffraction data.
53+
54+
This function uses this definition:
55+
56+
.. math::
57+
58+
L = d^4 \\sin\\theta
59+
60+
where :math:`d` is d-spacing, :math:`\\theta` is half the scattering angle
61+
(note the definitions in
62+
https://scipp.github.io/scippneutron/user-guide/coordinate-transformations.html).
63+
64+
The Lorentz factor as defined here is suitable for correcting time-of-flight data
65+
expressed in wavelength or d-spacing.
66+
It follows the definition used by GSAS-II, see page 140 of
67+
https://subversion.xray.aps.anl.gov/EXPGUI/gsas/all/GSAS%20Manual.pdf
68+
69+
Parameters
70+
----------
71+
da:
72+
Input data with coordinates ``two_theta`` and ``dspacing``.
73+
74+
Returns
75+
-------
76+
:
77+
``da`` multiplied by :math:`L`.
78+
Has the same dtype as ``da``.
79+
"""
80+
# The implementation is optimized under the assumption that two_theta
81+
# is small and dspacing and the data are large.
82+
out = _shallow_copy(da)
83+
dspacing = _event_or_bin_coord(da, 'dspacing')
84+
two_theta = _event_or_bin_coord(da, 'two_theta')
85+
theta = 0.5 * two_theta
86+
87+
d4 = dspacing.broadcast(sizes=out.sizes) ** 4
88+
if out.bins is None:
89+
out.data = d4.to(dtype=out.dtype, copy=False)
90+
out_data = out.data
91+
else:
92+
out.bins.data = d4.to(dtype=out.bins.dtype, copy=False)
93+
out_data = out.bins.data
94+
out_data *= sc.sin(theta, out=theta)
95+
out_data *= da.data if da.bins is None else da.bins.data
96+
return out
97+
98+
99+
def _shallow_copy(da: sc.DataArray) -> sc.DataArray:
100+
# See https://github.com/scipp/scipp/issues/2773
101+
out = da.copy(deep=False)
102+
if da.bins is not None:
103+
out.data = sc.bins(**da.bins.constituents)
104+
return out
105+
106+
107+
def _event_or_bin_coord(da: sc.DataArray, name: str) -> sc.Variable:
108+
try:
109+
return da.bins.coords[name]
110+
except (AttributeError, KeyError):
111+
# Either not binned or no event coord with this name.
112+
return da.coords[name]

tests/diffraction/powder/corrections_test.py

Lines changed: 202 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,10 @@
33
import numpy as np
44
import pytest
55
import scipp as sc
6+
import scipp.testing
67

78
from ess.diffraction.powder import merge_calibration
9+
from ess.diffraction.powder.correction import apply_lorentz_correction
810

911

1012
@pytest.fixture
@@ -109,3 +111,203 @@ def test_merge_calibration_raises_if_mask_exists(calibration):
109111
)
110112
with pytest.raises(ValueError):
111113
merge_calibration(into=da, calibration=calibration)
114+
115+
116+
@pytest.mark.parametrize('data_dtype', ('float32', 'float64'))
117+
@pytest.mark.parametrize('dspacing_dtype', ('float32', 'float64'))
118+
@pytest.mark.parametrize('two_theta_dtype', ('float32', 'float64'))
119+
def test_lorentz_correction_dense_1d_coords(
120+
data_dtype, dspacing_dtype, two_theta_dtype
121+
):
122+
da = sc.DataArray(
123+
sc.full(
124+
value=2.1,
125+
sizes={'detector_number': 3, 'dspacing': 4},
126+
unit='counts',
127+
dtype=data_dtype,
128+
),
129+
coords={
130+
'dspacing': sc.array(
131+
dims=['dspacing'],
132+
values=[0.1, 0.4, 0.7, 1.1],
133+
unit='angstrom',
134+
dtype=dspacing_dtype,
135+
),
136+
'two_theta': sc.array(
137+
dims=['detector_number'],
138+
values=[0.8, 0.9, 1.3],
139+
unit='rad',
140+
dtype=two_theta_dtype,
141+
),
142+
'detector_number': sc.array(
143+
dims=['detector_number'], values=[0, 1, 2], unit=None
144+
),
145+
},
146+
)
147+
original = da.copy(deep=True)
148+
corrected = apply_lorentz_correction(da)
149+
150+
assert corrected.sizes == {'detector_number': 3, 'dspacing': 4}
151+
assert corrected.unit == 'angstrom**4 * counts'
152+
assert corrected.dtype == original.dtype
153+
assert not corrected.variances
154+
assert not corrected.bins
155+
156+
d = original.coords['dspacing'].broadcast(sizes=corrected.sizes).values
157+
two_theta = original.coords['two_theta'].broadcast(sizes=corrected.sizes).values
158+
if any(dt == 'float32' for dt in (data_dtype, dspacing_dtype, two_theta_dtype)):
159+
rtol = 1e-6
160+
else:
161+
rtol = 1e-15
162+
np.testing.assert_allclose(
163+
corrected.data.values, 2.1 * d**4 * np.sin(two_theta / 2), rtol=rtol
164+
)
165+
166+
assert set(corrected.coords.keys()) == {'two_theta', 'dspacing', 'detector_number'}
167+
for key, coord in corrected.coords.items():
168+
sc.testing.assert_identical(coord, original.coords[key])
169+
sc.testing.assert_identical(da.coords[key], original.coords[key])
170+
171+
172+
def test_apply_lorentz_correction_dense_2d_coord():
173+
da = sc.DataArray(
174+
sc.full(value=0.7, sizes={'detector_number': 3, 'dspacing': 4}),
175+
coords={
176+
'dspacing': sc.array(
177+
dims=['dspacing'], values=[0.1, 0.4, 0.7, 1.1], unit='angstrom'
178+
).broadcast(sizes={'detector_number': 3, 'dspacing': 4}),
179+
'two_theta': sc.array(
180+
dims=['detector_number'], values=[0.8, 0.9, 1.3], unit='rad'
181+
),
182+
'detector_number': sc.array(
183+
dims=['detector_number'], values=[0, 1, 2], unit=None
184+
),
185+
},
186+
)
187+
original = da.copy(deep=True)
188+
corrected = apply_lorentz_correction(da)
189+
190+
assert corrected.sizes == {'detector_number': 3, 'dspacing': 4}
191+
assert corrected.unit == 'angstrom**4'
192+
assert corrected.dtype == original.dtype
193+
assert not corrected.variances
194+
assert not corrected.bins
195+
196+
d = original.coords['dspacing'].values
197+
two_theta = original.coords['two_theta'].broadcast(sizes=corrected.sizes).values
198+
np.testing.assert_allclose(
199+
corrected.data.values, 0.7 * d**4 * np.sin(two_theta / 2)
200+
)
201+
202+
assert set(corrected.coords.keys()) == {'two_theta', 'dspacing', 'detector_number'}
203+
for key, coord in corrected.coords.items():
204+
sc.testing.assert_identical(coord, original.coords[key])
205+
sc.testing.assert_identical(da.coords[key], original.coords[key])
206+
207+
208+
@pytest.mark.parametrize('data_dtype', ('float32', 'float64'))
209+
@pytest.mark.parametrize('dspacing_dtype', ('float32', 'float64'))
210+
@pytest.mark.parametrize('two_theta_dtype', ('float32', 'float64'))
211+
def test_apply_lorentz_correction_event_coords(
212+
data_dtype, dspacing_dtype, two_theta_dtype
213+
):
214+
buffer = sc.DataArray(
215+
sc.full(value=1.5, sizes={'event': 6}, unit='counts', dtype=data_dtype),
216+
coords={
217+
'detector_number': sc.array(dims=['event'], values=[0, 3, 2, 2, 0, 4]),
218+
'dspacing': sc.array(
219+
dims=['event'],
220+
values=[0.1, 0.4, 0.2, 1.0, 1.3, 0.7],
221+
unit='angstrom',
222+
dtype=dspacing_dtype,
223+
),
224+
},
225+
)
226+
da = buffer.group('detector_number').bin(dspacing=2)
227+
da.coords['two_theta'] = sc.array(
228+
dims=['detector_number'],
229+
values=[0.4, 1.2, 1.5, 1.6],
230+
unit='rad',
231+
dtype=two_theta_dtype,
232+
)
233+
original = da.copy(deep=True)
234+
corrected = apply_lorentz_correction(da)
235+
236+
assert corrected.sizes == {'detector_number': 4, 'dspacing': 2}
237+
assert corrected.bins.unit == 'angstrom**4 * counts'
238+
assert corrected.bins.dtype == data_dtype
239+
240+
d = original.bins.coords['dspacing']
241+
two_theta = sc.bins_like(original, original.coords['two_theta'])
242+
expected = (1.5 * d**4 * sc.sin(two_theta / 2)).to(dtype=data_dtype)
243+
if any(dt == 'float32' for dt in (data_dtype, dspacing_dtype, two_theta_dtype)):
244+
rtol = 1e-6
245+
else:
246+
rtol = 1e-15
247+
np.testing.assert_allclose(
248+
corrected.bins.concat().value.values,
249+
expected.bins.concat().value.values,
250+
rtol=rtol,
251+
)
252+
253+
assert set(corrected.coords.keys()) == {'detector_number', 'two_theta', 'dspacing'}
254+
for key, coord in corrected.coords.items():
255+
sc.testing.assert_identical(coord, original.coords[key])
256+
sc.testing.assert_identical(da.coords[key], original.coords[key])
257+
sc.testing.assert_identical(
258+
corrected.bins.coords['dspacing'], original.bins.coords['dspacing']
259+
)
260+
sc.testing.assert_identical(
261+
da.bins.coords['dspacing'], original.bins.coords['dspacing']
262+
)
263+
264+
265+
def test_apply_lorentz_correction_favors_event_coords():
266+
buffer = sc.DataArray(
267+
sc.full(value=1.5, sizes={'event': 6}, unit='counts'),
268+
coords={
269+
'detector_number': sc.array(dims=['event'], values=[0, 3, 2, 2, 0, 4]),
270+
'dspacing': sc.array(
271+
dims=['event'],
272+
values=[0.1, 0.4, 0.2, 1.0, 1.3, 0.7],
273+
unit='angstrom',
274+
),
275+
},
276+
)
277+
da = buffer.group('detector_number').bin(dspacing=2)
278+
da.coords['two_theta'] = sc.array(
279+
dims=['detector_number'],
280+
values=[0.4, 1.2, 1.5, 1.6],
281+
unit='rad',
282+
)
283+
da.coords['dspacing'][-1] = 10.0 # this should not affect the correction
284+
corrected = apply_lorentz_correction(da)
285+
286+
d = da.bins.coords['dspacing'] # event-coord, not the modified bin-coord
287+
two_theta = sc.bins_like(da, da.coords['two_theta'])
288+
expected = 1.5 * d**4 * sc.sin(two_theta / 2)
289+
np.testing.assert_allclose(
290+
corrected.bins.concat().value.values,
291+
expected.bins.concat().value.values,
292+
rtol=1e-15,
293+
)
294+
295+
for key, coord in corrected.coords.items():
296+
sc.testing.assert_identical(coord, da.coords[key])
297+
sc.testing.assert_identical(da.coords[key], da.coords[key])
298+
sc.testing.assert_identical(
299+
corrected.bins.coords['dspacing'], da.bins.coords['dspacing']
300+
)
301+
302+
303+
def test_apply_lorentz_correction_needs_coords():
304+
da = sc.DataArray(
305+
sc.ones(sizes={'detector_number': 3, 'dspacing': 4}),
306+
coords={
307+
'detector_number': sc.array(
308+
dims=['detector_number'], values=[0, 1, 2], unit=None
309+
)
310+
},
311+
)
312+
with pytest.raises(KeyError):
313+
apply_lorentz_correction(da)

0 commit comments

Comments
 (0)