Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions centrosome/_np_compat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import numpy as np

# Check NumPy version once
NP_MAJOR_VERSION = int(np.__version__.split(".")[0])

# Set up aliases based on version
if NP_MAJOR_VERSION >= 2:
# NumPy 2.x aliases
np_product = np.prod
np_cumproduct = np.cumprod
np_NaN = np.nan
np_Inf = np.inf
else:
# NumPy 1.x aliases
np_product = np.product
np_cumproduct = np.cumproduct
np_NaN = np.NaN
np_Inf = np.Inf
12 changes: 2 additions & 10 deletions centrosome/bg_compensate.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,10 +406,7 @@ def bg_compensate(img, sigma, splinepoints, scale):
"""Reads file, subtracts background. Returns [compensated image, background]."""

from PIL import Image
import pylab
from matplotlib.image import pil_to_array
from centrosome.filter import canny
import matplotlib

img = Image.open(img)
if img.mode == "I;16":
Expand All @@ -432,10 +429,7 @@ def bg_compensate(img, sigma, splinepoints, scale):
else:
img = new_img.astype(np.float32) / 65535.0
else:
img = pil_to_array(img)

pylab.subplot(1, 3, 1).imshow(img, cmap=matplotlib.cm.Greys_r)
pylab.show()
img = np.array(img)

if len(img.shape) > 2:
raise ValueError("Image must be grayscale")
Expand All @@ -457,6 +451,4 @@ def bg_compensate(img, sigma, splinepoints, scale):
print("Executed in %f sec" % (time.process_time() - t0))
bg[~mask] = img[~mask]

pylab.subplot(1, 3, 2).imshow(img - bg, cmap=matplotlib.cm.Greys_r)
pylab.subplot(1, 3, 3).imshow(bg, cmap=matplotlib.cm.Greys_r)
pylab.show()
return img - bg, bg
18 changes: 11 additions & 7 deletions centrosome/cpmorphology.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
pass
from . import _convex_hull

from centrosome._np_compat import np_Inf, np_product, np_NaN

logger = logging.getLogger(__name__)
"""A structuring element for eight-connecting a neigborhood"""
eight_connect = scind.generate_binary_structure(2, 2)
Expand Down Expand Up @@ -165,6 +167,8 @@ def adjacent(labels):
a different label.

"""
# upcast for numpy 2 compatibility
labels = labels.astype(np.int32, copy=False)
high = labels.max() + 1
if high > np.iinfo(labels.dtype).max:
labels = labels.astype(np.int32)
Expand Down Expand Up @@ -1383,7 +1387,7 @@ def median_of_labels(image, labels, indices):
labels = anti_indices[labels[include]]
image = image[include]
if len(labels) == 0:
return np.array([np.nan] * len(indices))
return np.array([np_NaN] * len(indices))
index = np.lexsort((image, labels))
labels, image = labels[index], image[index]
counts = np.bincount(labels)
Expand All @@ -1397,7 +1401,7 @@ def median_of_labels(image, labels, indices):
median[counts > 0] = image[middle_low[counts > 0]]
median[evens] += image[middle_low[evens] + 1]
median[evens] /= 2
median[counts == 0] = np.nan
median[counts == 0] = np_NaN
return median


Expand Down Expand Up @@ -1477,7 +1481,7 @@ def minimum_enclosing_circle(labels, indexes=None, hull_and_point_count=None):
#
# Start out by eliminating the degenerate cases: 0, 1 and 2
#
centers[point_count == 0, :] = np.NaN
centers[point_count == 0, :] = np_NaN
if np.all(point_count == 0):
# Bail if there are no points in any hull to prevent
# index failures below.
Expand Down Expand Up @@ -1813,7 +1817,7 @@ def associate_by_distance(labels_a, labels_b, distance):
ab_consider = (ab_distance_minus_radii <= distance) & (~ab_easy_wins)
ij_consider = np.dstack((i[ab_consider], j[ab_consider]))
ij_consider.shape = ij_consider.shape[1:]
if np.product(ij_consider.shape) == 0:
if np_product(ij_consider.shape) == 0:
return ij_wins
if True:
wins = []
Expand Down Expand Up @@ -2941,7 +2945,7 @@ def block(shape, block_shape):
i = (i * multiplier[0]).astype(int)
j = (j * multiplier[1]).astype(int)
labels = i * ijmax[1] + j
indexes = np.array(list(range(np.product(ijmax))))
indexes = np.array(list(range(np_product(ijmax))))
return labels, indexes


Expand Down Expand Up @@ -4197,7 +4201,7 @@ def skeletonize(image, mask=None, ordering=None):
# of skeletons
#
np.random.seed(0)
tiebreaker = np.random.permutation(np.arange(np.product(masked_image.shape)))
tiebreaker = np.random.permutation(np.arange(np_product(masked_image.shape)))
tiebreaker.shape = masked_image.shape
order = np.lexsort((tiebreaker[masked_image], corner_score[masked_image], distance))
order = np.ascontiguousarray(order, np.int32)
Expand Down Expand Up @@ -4429,7 +4433,7 @@ def regional_maximum(image, mask=None, structure=None, ties_are_ok=False):
labels, label_count = scind.label(result, eight_connect)
np.random.seed(0)
ro_distance = rank_order(distance)[0].astype(float)
count = np.product(ro_distance.shape)
count = np_product(ro_distance.shape)
ro_distance.flat += np.random.permutation(count).astype(float) / float(count)
positions = scind.maximum_position(
ro_distance, labels, np.arange(label_count) + 1
Expand Down
5 changes: 3 additions & 2 deletions centrosome/filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
grey_reconstruction,
)
from six.moves import range
from centrosome._np_compat import np_product, np_NaN

"""# of points handled in the first pass of the convex hull code"""
CONVEX_HULL_CHUNKSIZE = 250000
Expand All @@ -39,7 +40,7 @@ def stretch(image, mask=None):
returns the stretched image
"""
image = np.array(image, float)
if np.product(image.shape) == 0:
if np_product(image.shape) == 0:
return image
if mask is None:
minval = np.min(image)
Expand Down Expand Up @@ -1898,7 +1899,7 @@ def hessian(
#
# Calculate for d01 != 0
#
v = np.ones((image.shape[0], image.shape[1], 2, 2)) * np.nan
v = np.ones((image.shape[0], image.shape[1], 2, 2)) * np_NaN
v[:, :, :, 0] = L - hessian[:, :, 1, 1, np.newaxis]
v[:, :, :, 1] = hessian[:, :, 0, 1, np.newaxis]
#
Expand Down
11 changes: 6 additions & 5 deletions centrosome/otsu.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import absolute_import
from __future__ import division
import numpy as np
from centrosome._np_compat import np_Inf, np_NaN


def otsu(data, min_threshold=None, max_threshold=None, bins=256):
Expand Down Expand Up @@ -167,7 +168,7 @@ def otsu3(data, min_threshold=None, max_threshold=None, bins=128):
mean = (cs[j] - cs[i]) / diff
mean2 = (cs2[j] - cs2[i]) / diff
score_middle = w * (mean2 - mean ** 2)
score_middle[i >= j] = np.Inf
score_middle[i >= j] = np_Inf
score = (
score_low[i * bins // len(data)]
+ score_middle
Expand Down Expand Up @@ -217,7 +218,7 @@ def entropy3(data, bins=128):
mean = (cs[j] - cs[i]) / diff
mean2 = (cs2[j] - cs2[i]) / diff
score_middle = entropy_score(mean2 - mean ** 2 + 1.0 / 512.0, bins, w, False)
score_middle[(i >= j) | np.isnan(score_middle)] = np.Inf
score_middle[(i >= j) | np.isnan(score_middle)] = np_Inf
score = score_low[i // bin_len] + score_middle + score_high[j // bin_len]
best_score = np.min(score)
best_i_j = np.argwhere(score == best_score)
Expand All @@ -235,13 +236,13 @@ def entropy_score(var, bins, w=None, decimate=True):
n = len(var)
var = var[0 : n : n // bins]
score = w * np.log(var * w * np.sqrt(2 * np.pi * np.exp(1)))
score[np.isnan(score)] = np.Inf
score[np.isnan(score)] = np_Inf
return score


def weighted_variance(cs, cs2, lo, hi):
if hi == lo:
return np.Infinity
return np_Inf
w = (hi - lo) / float(len(cs))
mean = (cs[hi] - cs[lo]) / (hi - lo)
mean2 = (cs2[hi] - cs2[lo]) / (hi - lo)
Expand All @@ -250,7 +251,7 @@ def weighted_variance(cs, cs2, lo, hi):

def otsu_entropy(cs, cs2, lo, hi):
if hi == lo:
return np.Infinity
return np_Inf
w = (hi - lo) / float(len(cs))
mean = (cs[hi] - cs[lo]) / (hi - lo)
mean2 = (cs2[hi] - cs2[lo]) / (hi - lo)
Expand Down
31 changes: 16 additions & 15 deletions centrosome/threshold.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from .filter import stretch, unstretch
from six.moves import range
from six.moves import zip
from centrosome._np_compat import np_product, np_Inf

TM_OTSU = "Otsu"
TM_OTSU_GLOBAL = "Otsu Global"
Expand Down Expand Up @@ -374,7 +375,7 @@ def get_mog_threshold(image, mask=None, object_fraction=0.2):
the user.
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
pixel_count = np.product(cropped_image.shape)
pixel_count = np_product(cropped_image.shape)
max_count = 512 ** 2 # maximum # of pixels analyzed
#
# We need at least 3 pixels to keep from crashing because the highest
Expand All @@ -401,9 +402,9 @@ def get_mog_threshold(image, mask=None, object_fraction=0.2):
# in case there are any quantization effects that have resulted in
# unnaturally many 0:s or 1:s in the image.
cropped_image.sort()
one_percent = (np.product(cropped_image.shape) + 99) // 100
one_percent = (np_product(cropped_image.shape) + 99) // 100
cropped_image = cropped_image[one_percent:-one_percent]
pixel_count = np.product(cropped_image.shape)
pixel_count = np_product(cropped_image.shape)
# Guess at the class means for the 3 classes: background,
# in-between and object
bg_pixel = cropped_image[int(round(pixel_count * background_fraction / 2.0))]
Expand All @@ -420,7 +421,7 @@ def get_mog_threshold(image, mask=None, object_fraction=0.2):
# distributions/classes to the data. Note, the code below is general
# and works for any number of classes. Iterate until parameters don't
# change anymore.
class_count = np.prod(class_mean.shape)
class_count = np_product(class_mean.shape)
#
# Do a coarse iteration on subsampled data and a fine iteration on the real
# data
Expand Down Expand Up @@ -500,7 +501,7 @@ def get_background_threshold(image, mask=None):
2 (an arbitrary empirical factor). The user will presumably adjust the
multiplication factor as needed."""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) == 0:
if np_product(cropped_image.shape) == 0:
return 0
img_min = np.min(cropped_image)
img_max = np.max(cropped_image)
Expand Down Expand Up @@ -566,7 +567,7 @@ def get_robust_background_threshold(
"""

cropped_image = np.array(image.flat) if mask is None else image[mask]
n_pixels = np.product(cropped_image.shape)
n_pixels = np_product(cropped_image.shape)
if n_pixels < 3:
return 0

Expand All @@ -593,7 +594,7 @@ def mad(a):

returns the median of the deviation of a from its median.
"""
a = np.asfarray(a).flatten()
a = np.asarray(a, dtype=float).flatten()
return np.median(np.abs(a - np.median(a)))


Expand Down Expand Up @@ -625,7 +626,7 @@ def get_ridler_calvard_threshold(image, mask=None):
Cybernetics, vol. 8, no. 8, August 1978.
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) < 3:
if np_product(cropped_image.shape) < 3:
return 0
if np.min(cropped_image) == np.max(cropped_image):
return cropped_image[0]
Expand Down Expand Up @@ -661,7 +662,7 @@ def get_ridler_calvard_threshold(image, mask=None):
def get_kapur_threshold(image, mask=None):
"""The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space."""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) < 3:
if np_product(cropped_image.shape) < 3:
return 0
if np.min(cropped_image) == np.max(cropped_image):
return cropped_image[0]
Expand All @@ -678,7 +679,7 @@ def get_kapur_threshold(image, mask=None):
histogram = histogram[keep]
histogram_values = histogram_values[keep]
# check for corner cases
if np.product(histogram_values) == 1:
if np_product(histogram_values) == 1:
return 2 ** histogram_values[0]
# Normalize to probabilities
p = histogram.astype(float) / float(np.sum(histogram))
Expand All @@ -693,7 +694,7 @@ def get_kapur_threshold(image, mask=None):
hi_entropy = hi_e / hi_sum - np.log2(hi_sum)

sum_entropy = lo_entropy[:-1] + hi_entropy[:-1]
sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf
sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np_Inf
entry = np.argmin(sum_entropy)
return 2 ** ((histogram_values[entry] + histogram_values[entry + 1]) / 2)

Expand Down Expand Up @@ -785,8 +786,8 @@ def weighted_variance(image, mask, binary_image):

fg = np.log2(np.maximum(image[binary_image & mask], minval))
bg = np.log2(np.maximum(image[(~binary_image) & mask], minval))
nfg = np.product(fg.shape)
nbg = np.product(bg.shape)
nfg = np_product(fg.shape)
nbg = np_product(bg.shape)
if nfg == 0:
return np.var(bg)
elif nbg == 0:
Expand Down Expand Up @@ -845,9 +846,9 @@ def sum_of_entropies(image, mask, binary_image):
#
hfg = hfg[hfg > 0]
hbg = hbg[hbg > 0]
if np.product(hfg.shape) == 0:
if np_product(hfg.shape) == 0:
hfg = np.ones((1,), int)
if np.product(hbg.shape) == 0:
if np_product(hbg.shape) == 0:
hbg = np.ones((1,), int)
#
# Normalize
Expand Down
3 changes: 2 additions & 1 deletion centrosome/zernike.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .cpmorphology import fill_labeled_holes, draw_line
from six.moves import range
from six.moves import zip
from centrosome._np_compat import np_cumproduct


def construct_zernike_lookuptable(zernike_indexes):
Expand All @@ -19,7 +20,7 @@ def construct_zernike_lookuptable(zernike_indexes):
"""
n_max = np.max(zernike_indexes[:, 0])
factorial = np.ones((1 + n_max,), dtype=float)
factorial[1:] = np.cumproduct(np.arange(1, 1 + n_max, dtype=float))
factorial[1:] = np_cumproduct(np.arange(1, 1 + n_max, dtype=float))
width = int(n_max // 2 + 1)
lut = np.zeros((zernike_indexes.shape[0], width), dtype=float)
for idx, (n, m) in enumerate(zernike_indexes):
Expand Down
2 changes: 1 addition & 1 deletion pixi.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ twine = ">=6.0.1, <7"
[dependencies]
python = "3.9.*"
cython = "<3.0"
numpy = "<2"
numpy = "*"
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[build-system]
requires = ["setuptools", "numpy<2"]
requires = ["setuptools", "numpy"]
build-backend = "setuptools.build_meta"
3 changes: 1 addition & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,7 @@ def run_tests(self):
},
install_requires=[
"deprecation",
"matplotlib>=3.1.3,<4",
"numpy>=1.18.2,<2",
"numpy>=1.18.2",
"pillow>=7.1.0,<12",
"scikit-image>=0.17.2,<=0.24",
"scipy>=1.4.1,!=1.11.0,<2",
Expand Down
Loading