Skip to content

Commit 2030f30

Browse files
committed
low, high -> left, right
1 parent 830095a commit 2030f30

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

src/ess/reduce/time_of_flight/to_events.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -45,21 +45,21 @@ def to_events(
4545
edge_sizes = {dim: da.sizes[dim] for dim in edge_dims}
4646
for dim in edge_dims:
4747
coord = da.coords[dim]
48-
low = sc.broadcast(coord[dim, :-1], sizes=edge_sizes).values
49-
high = sc.broadcast(coord[dim, 1:], sizes=edge_sizes).values
48+
left = sc.broadcast(coord[dim, :-1], sizes=edge_sizes).values
49+
right = sc.broadcast(coord[dim, 1:], sizes=edge_sizes).values
5050

5151
# The numpy.random.uniform function below does not support NaNs, so we need to
5252
# replace them with zeros, and then replace them back after the random numbers
5353
# have been generated.
54-
nans = np.isnan(low) | np.isnan(high)
55-
low = np.where(nans, 0.0, low)
56-
high = np.where(nans, 0.0, high)
54+
nans = np.isnan(left) | np.isnan(right)
55+
left = np.where(nans, 0.0, left)
56+
right = np.where(nans, 0.0, right)
5757
# Ensure low <= high
58-
low, high = np.minimum(low, high), np.maximum(low, high)
58+
left, right = np.minimum(left, right), np.maximum(left, right)
5959

6060
# In each bin, we generate a number of events with a uniform distribution.
6161
events = rng.uniform(
62-
low, high, size=(events_per_bin, *list(edge_sizes.values()))
62+
left, right, size=(events_per_bin, *list(edge_sizes.values()))
6363
)
6464
events[..., nans] = np.nan
6565
event_coords[dim] = sc.array(
@@ -79,20 +79,20 @@ def to_events(
7979
data = da.data
8080
if event_masks:
8181
inv_mask = (~reduce(lambda a, b: a | b, event_masks.values())).to(dtype=int)
82-
inv_mask.unit = ''
82+
inv_mask.unit = ""
8383
data = data * inv_mask
8484

8585
# Create the data counts, which are the original counts divided by the number of
8686
# events per bin
8787
sizes = {event_dim: events_per_bin} | da.sizes
8888
val = sc.broadcast(sc.values(data) / float(events_per_bin), sizes=sizes)
89-
kwargs = {'dims': sizes.keys(), 'values': val.values, 'unit': data.unit}
89+
kwargs = {"dims": sizes.keys(), "values": val.values, "unit": data.unit}
9090
if data.variances is not None:
9191
# Note here that all the events are correlated.
9292
# If we later histogram the events with different edges than the original
9393
# histogram, then neighboring bins will be correlated, and the error obtained
9494
# will be too small. It is however not clear what can be done to improve this.
95-
kwargs['variances'] = sc.broadcast(
95+
kwargs["variances"] = sc.broadcast(
9696
sc.variances(data) / float(events_per_bin), sizes=sizes
9797
).values
9898
new_data = sc.array(**kwargs)

0 commit comments

Comments
 (0)