|
3 | 3 | import numpy as np
|
4 | 4 | import pytest
|
5 | 5 | import scipp as sc
|
| 6 | +import scipp.testing |
6 | 7 |
|
7 | 8 | from ess.diffraction.powder import merge_calibration
|
| 9 | +from ess.diffraction.powder.correction import apply_lorentz_correction |
8 | 10 |
|
9 | 11 |
|
10 | 12 | @pytest.fixture
|
@@ -109,3 +111,203 @@ def test_merge_calibration_raises_if_mask_exists(calibration):
|
109 | 111 | )
|
110 | 112 | with pytest.raises(ValueError):
|
111 | 113 | merge_calibration(into=da, calibration=calibration)
|
| 114 | + |
| 115 | + |
| 116 | +@pytest.mark.parametrize('data_dtype', ('float32', 'float64')) |
| 117 | +@pytest.mark.parametrize('dspacing_dtype', ('float32', 'float64')) |
| 118 | +@pytest.mark.parametrize('two_theta_dtype', ('float32', 'float64')) |
| 119 | +def test_lorentz_correction_dense_1d_coords( |
| 120 | + data_dtype, dspacing_dtype, two_theta_dtype |
| 121 | +): |
| 122 | + da = sc.DataArray( |
| 123 | + sc.full( |
| 124 | + value=2.1, |
| 125 | + sizes={'detector_number': 3, 'dspacing': 4}, |
| 126 | + unit='counts', |
| 127 | + dtype=data_dtype, |
| 128 | + ), |
| 129 | + coords={ |
| 130 | + 'dspacing': sc.array( |
| 131 | + dims=['dspacing'], |
| 132 | + values=[0.1, 0.4, 0.7, 1.1], |
| 133 | + unit='angstrom', |
| 134 | + dtype=dspacing_dtype, |
| 135 | + ), |
| 136 | + 'two_theta': sc.array( |
| 137 | + dims=['detector_number'], |
| 138 | + values=[0.8, 0.9, 1.3], |
| 139 | + unit='rad', |
| 140 | + dtype=two_theta_dtype, |
| 141 | + ), |
| 142 | + 'detector_number': sc.array( |
| 143 | + dims=['detector_number'], values=[0, 1, 2], unit=None |
| 144 | + ), |
| 145 | + }, |
| 146 | + ) |
| 147 | + original = da.copy(deep=True) |
| 148 | + corrected = apply_lorentz_correction(da) |
| 149 | + |
| 150 | + assert corrected.sizes == {'detector_number': 3, 'dspacing': 4} |
| 151 | + assert corrected.unit == 'angstrom**4 * counts' |
| 152 | + assert corrected.dtype == original.dtype |
| 153 | + assert not corrected.variances |
| 154 | + assert not corrected.bins |
| 155 | + |
| 156 | + d = original.coords['dspacing'].broadcast(sizes=corrected.sizes).values |
| 157 | + two_theta = original.coords['two_theta'].broadcast(sizes=corrected.sizes).values |
| 158 | + if any(dt == 'float32' for dt in (data_dtype, dspacing_dtype, two_theta_dtype)): |
| 159 | + rtol = 1e-6 |
| 160 | + else: |
| 161 | + rtol = 1e-15 |
| 162 | + np.testing.assert_allclose( |
| 163 | + corrected.data.values, 2.1 * d**4 * np.sin(two_theta / 2), rtol=rtol |
| 164 | + ) |
| 165 | + |
| 166 | + assert set(corrected.coords.keys()) == {'two_theta', 'dspacing', 'detector_number'} |
| 167 | + for key, coord in corrected.coords.items(): |
| 168 | + sc.testing.assert_identical(coord, original.coords[key]) |
| 169 | + sc.testing.assert_identical(da.coords[key], original.coords[key]) |
| 170 | + |
| 171 | + |
| 172 | +def test_apply_lorentz_correction_dense_2d_coord(): |
| 173 | + da = sc.DataArray( |
| 174 | + sc.full(value=0.7, sizes={'detector_number': 3, 'dspacing': 4}), |
| 175 | + coords={ |
| 176 | + 'dspacing': sc.array( |
| 177 | + dims=['dspacing'], values=[0.1, 0.4, 0.7, 1.1], unit='angstrom' |
| 178 | + ).broadcast(sizes={'detector_number': 3, 'dspacing': 4}), |
| 179 | + 'two_theta': sc.array( |
| 180 | + dims=['detector_number'], values=[0.8, 0.9, 1.3], unit='rad' |
| 181 | + ), |
| 182 | + 'detector_number': sc.array( |
| 183 | + dims=['detector_number'], values=[0, 1, 2], unit=None |
| 184 | + ), |
| 185 | + }, |
| 186 | + ) |
| 187 | + original = da.copy(deep=True) |
| 188 | + corrected = apply_lorentz_correction(da) |
| 189 | + |
| 190 | + assert corrected.sizes == {'detector_number': 3, 'dspacing': 4} |
| 191 | + assert corrected.unit == 'angstrom**4' |
| 192 | + assert corrected.dtype == original.dtype |
| 193 | + assert not corrected.variances |
| 194 | + assert not corrected.bins |
| 195 | + |
| 196 | + d = original.coords['dspacing'].values |
| 197 | + two_theta = original.coords['two_theta'].broadcast(sizes=corrected.sizes).values |
| 198 | + np.testing.assert_allclose( |
| 199 | + corrected.data.values, 0.7 * d**4 * np.sin(two_theta / 2) |
| 200 | + ) |
| 201 | + |
| 202 | + assert set(corrected.coords.keys()) == {'two_theta', 'dspacing', 'detector_number'} |
| 203 | + for key, coord in corrected.coords.items(): |
| 204 | + sc.testing.assert_identical(coord, original.coords[key]) |
| 205 | + sc.testing.assert_identical(da.coords[key], original.coords[key]) |
| 206 | + |
| 207 | + |
| 208 | +@pytest.mark.parametrize('data_dtype', ('float32', 'float64')) |
| 209 | +@pytest.mark.parametrize('dspacing_dtype', ('float32', 'float64')) |
| 210 | +@pytest.mark.parametrize('two_theta_dtype', ('float32', 'float64')) |
| 211 | +def test_apply_lorentz_correction_event_coords( |
| 212 | + data_dtype, dspacing_dtype, two_theta_dtype |
| 213 | +): |
| 214 | + buffer = sc.DataArray( |
| 215 | + sc.full(value=1.5, sizes={'event': 6}, unit='counts', dtype=data_dtype), |
| 216 | + coords={ |
| 217 | + 'detector_number': sc.array(dims=['event'], values=[0, 3, 2, 2, 0, 4]), |
| 218 | + 'dspacing': sc.array( |
| 219 | + dims=['event'], |
| 220 | + values=[0.1, 0.4, 0.2, 1.0, 1.3, 0.7], |
| 221 | + unit='angstrom', |
| 222 | + dtype=dspacing_dtype, |
| 223 | + ), |
| 224 | + }, |
| 225 | + ) |
| 226 | + da = buffer.group('detector_number').bin(dspacing=2) |
| 227 | + da.coords['two_theta'] = sc.array( |
| 228 | + dims=['detector_number'], |
| 229 | + values=[0.4, 1.2, 1.5, 1.6], |
| 230 | + unit='rad', |
| 231 | + dtype=two_theta_dtype, |
| 232 | + ) |
| 233 | + original = da.copy(deep=True) |
| 234 | + corrected = apply_lorentz_correction(da) |
| 235 | + |
| 236 | + assert corrected.sizes == {'detector_number': 4, 'dspacing': 2} |
| 237 | + assert corrected.bins.unit == 'angstrom**4 * counts' |
| 238 | + assert corrected.bins.dtype == data_dtype |
| 239 | + |
| 240 | + d = original.bins.coords['dspacing'] |
| 241 | + two_theta = sc.bins_like(original, original.coords['two_theta']) |
| 242 | + expected = (1.5 * d**4 * sc.sin(two_theta / 2)).to(dtype=data_dtype) |
| 243 | + if any(dt == 'float32' for dt in (data_dtype, dspacing_dtype, two_theta_dtype)): |
| 244 | + rtol = 1e-6 |
| 245 | + else: |
| 246 | + rtol = 1e-15 |
| 247 | + np.testing.assert_allclose( |
| 248 | + corrected.bins.concat().value.values, |
| 249 | + expected.bins.concat().value.values, |
| 250 | + rtol=rtol, |
| 251 | + ) |
| 252 | + |
| 253 | + assert set(corrected.coords.keys()) == {'detector_number', 'two_theta', 'dspacing'} |
| 254 | + for key, coord in corrected.coords.items(): |
| 255 | + sc.testing.assert_identical(coord, original.coords[key]) |
| 256 | + sc.testing.assert_identical(da.coords[key], original.coords[key]) |
| 257 | + sc.testing.assert_identical( |
| 258 | + corrected.bins.coords['dspacing'], original.bins.coords['dspacing'] |
| 259 | + ) |
| 260 | + sc.testing.assert_identical( |
| 261 | + da.bins.coords['dspacing'], original.bins.coords['dspacing'] |
| 262 | + ) |
| 263 | + |
| 264 | + |
| 265 | +def test_apply_lorentz_correction_favors_event_coords(): |
| 266 | + buffer = sc.DataArray( |
| 267 | + sc.full(value=1.5, sizes={'event': 6}, unit='counts'), |
| 268 | + coords={ |
| 269 | + 'detector_number': sc.array(dims=['event'], values=[0, 3, 2, 2, 0, 4]), |
| 270 | + 'dspacing': sc.array( |
| 271 | + dims=['event'], |
| 272 | + values=[0.1, 0.4, 0.2, 1.0, 1.3, 0.7], |
| 273 | + unit='angstrom', |
| 274 | + ), |
| 275 | + }, |
| 276 | + ) |
| 277 | + da = buffer.group('detector_number').bin(dspacing=2) |
| 278 | + da.coords['two_theta'] = sc.array( |
| 279 | + dims=['detector_number'], |
| 280 | + values=[0.4, 1.2, 1.5, 1.6], |
| 281 | + unit='rad', |
| 282 | + ) |
| 283 | + da.coords['dspacing'][-1] = 10.0 # this should not affect the correction |
| 284 | + corrected = apply_lorentz_correction(da) |
| 285 | + |
| 286 | + d = da.bins.coords['dspacing'] # event-coord, not the modified bin-coord |
| 287 | + two_theta = sc.bins_like(da, da.coords['two_theta']) |
| 288 | + expected = 1.5 * d**4 * sc.sin(two_theta / 2) |
| 289 | + np.testing.assert_allclose( |
| 290 | + corrected.bins.concat().value.values, |
| 291 | + expected.bins.concat().value.values, |
| 292 | + rtol=1e-15, |
| 293 | + ) |
| 294 | + |
| 295 | + for key, coord in corrected.coords.items(): |
| 296 | + sc.testing.assert_identical(coord, da.coords[key]) |
| 297 | + sc.testing.assert_identical(da.coords[key], da.coords[key]) |
| 298 | + sc.testing.assert_identical( |
| 299 | + corrected.bins.coords['dspacing'], da.bins.coords['dspacing'] |
| 300 | + ) |
| 301 | + |
| 302 | + |
| 303 | +def test_apply_lorentz_correction_needs_coords(): |
| 304 | + da = sc.DataArray( |
| 305 | + sc.ones(sizes={'detector_number': 3, 'dspacing': 4}), |
| 306 | + coords={ |
| 307 | + 'detector_number': sc.array( |
| 308 | + dims=['detector_number'], values=[0, 1, 2], unit=None |
| 309 | + ) |
| 310 | + }, |
| 311 | + ) |
| 312 | + with pytest.raises(KeyError): |
| 313 | + apply_lorentz_correction(da) |
0 commit comments