Skip to content

Handling scalars in tests with NEP 51 numpy >=2 #11521

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 15 additions & 13 deletions computer_vision/haralick_descriptors.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float
>>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2]))
3.1622776601683795
"""
return np.sqrt(((original - reference) ** 2).mean())
return float(np.sqrt(((original - reference) ** 2).mean()))


def normalize_image(
Expand Down Expand Up @@ -298,16 +298,18 @@ def haralick_descriptors(matrix: np.ndarray) -> list[float]:

# Sum values for descriptors ranging from the first one to the last,
# as all are their respective origin matrix and not the resulting value yet.
return [
maximum_prob,
correlation.sum(),
energy.sum(),
contrast.sum(),
dissimilarity.sum(),
inverse_difference.sum(),
homogeneity.sum(),
entropy.sum(),
]
return np.array(
[
maximum_prob,
correlation.sum(),
energy.sum(),
contrast.sum(),
dissimilarity.sum(),
inverse_difference.sum(),
homogeneity.sum(),
entropy.sum(),
]
).tolist()


def get_descriptors(
Expand Down Expand Up @@ -335,7 +337,7 @@ def get_descriptors(
return np.concatenate(descriptors, axis=None)


def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32:
def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float:
"""
Simple method for calculating the euclidean distance between two points,
with type np.ndarray.
Expand All @@ -346,7 +348,7 @@ def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32:
>>> euclidean(a, b)
3.3166247903554
"""
return np.sqrt(np.sum(np.square(point_1 - point_2)))
return float(np.sqrt(np.sum(np.square(point_1 - point_2))))


def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]:
Expand Down
8 changes: 4 additions & 4 deletions data_structures/heap/binomial_heap.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ def delete_min(self):
# Update min_node
self.min_node = None

return min_value
return int(min_value)
# No right subtree corner case
# The structure of the tree implies that this should be the bottom root
# and there is at least one other root
Expand All @@ -292,7 +292,7 @@ def delete_min(self):
if i.val < self.min_node.val:
self.min_node = i
i = i.parent
return min_value
return int(min_value)
# General case
# Find the BinomialHeap of the right subtree of min_node
bottom_of_new = self.min_node.right
Expand All @@ -312,7 +312,7 @@ def delete_min(self):
self.bottom_root = bottom_of_new
self.min_node = min_of_new
# print("Single root, multiple nodes case")
return min_value
return int(min_value)
# Remaining cases
# Construct heap of right subtree
new_heap = BinomialHeap(
Expand Down Expand Up @@ -354,7 +354,7 @@ def delete_min(self):
# Merge heaps
self.merge_heaps(new_heap)

return min_value
return int(min_value)

def pre_order(self):
"""
Expand Down
2 changes: 1 addition & 1 deletion electronics/circular_convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def circular_convolution(self) -> list[float]:
final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal))

# rounding-off to two decimal places
return [round(i, 2) for i in final_signal]
return np.array([round(i, 2) for i in final_signal]).tolist()


if __name__ == "__main__":
Expand Down
12 changes: 6 additions & 6 deletions fractals/julia_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@
def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
"""
Evaluate $e^z + c$.
>>> eval_exponential(0, 0)
>>> eval_exponential(0, 0).item()
1.0
>>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15
>>> (abs(eval_exponential(1, np.pi*1.j)) < 1e-15).item()
True
>>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15
>>> (abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15).item()
True
"""
return np.exp(z_values) + c_parameter
Expand Down Expand Up @@ -101,17 +101,17 @@ def iterate_function(
>>> np.round(iterate_function(eval_quadratic_polynomial,
... 0,
... 3,
... np.array([0,1,2]))[0])
... np.array([0,1,2]))[0]).item()
0j
>>> np.round(iterate_function(eval_quadratic_polynomial,
... 0,
... 3,
... np.array([0,1,2]))[1])
... np.array([0,1,2]))[1]).item()
(1+0j)
>>> np.round(iterate_function(eval_quadratic_polynomial,
... 0,
... 3,
... np.array([0,1,2]))[2])
... np.array([0,1,2]))[2]).item()
(256+0j)
"""

Expand Down
2 changes: 1 addition & 1 deletion graphics/bezier_curve.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def basis_function(self, t: float) -> list[float]:
)
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(output_values), 5) == 1
return output_values
return [float(i) for i in output_values]

def bezier_curve_function(self, t: float) -> tuple[float, float]:
"""
Expand Down
4 changes: 2 additions & 2 deletions graphs/dijkstra_binary_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def dijkstra(
x, y = predecessors[x, y]
path.append(source) # add the source manually
path.reverse()
return matrix[destination], path
return float(matrix[destination]), path

for i in range(len(dx)):
nx, ny = x + dx[i], y + dy[i]
Expand All @@ -80,7 +80,7 @@ def dijkstra(
matrix[nx, ny] = dist + 1
predecessors[nx, ny] = (x, y)

return np.inf, []
return float(np.inf), []


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion linear_algebra/src/power_iteration.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def power_iteration(
if is_complex:
lambda_ = np.real(lambda_)

return lambda_, vector
return float(lambda_), vector


def test_power_iteration() -> None:
Expand Down
6 changes: 3 additions & 3 deletions linear_programming/simplex.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def find_pivot(self) -> tuple[Any, Any]:
# Arg of minimum quotient excluding the nan values. n_stages is added
# to compensate for earlier exclusion of objective columns
row_idx = np.nanargmin(quotients) + self.n_stages
return row_idx, col_idx
return row_idx.item(), col_idx.item()

def pivot(self, row_idx: int, col_idx: int) -> np.ndarray:
"""Pivots on value on the intersection of pivot row and column.
Expand Down Expand Up @@ -315,7 +315,7 @@ def interpret_tableau(self) -> dict[str, float]:
{'P': 5.0, 'x1': 1.0, 'x2': 1.0}
"""
# P = RHS of final tableau
output_dict = {"P": abs(self.tableau[0, -1])}
output_dict = {"P": float(abs(self.tableau[0, -1]))}

for i in range(self.n_vars):
# Gives indices of nonzero entries in the ith column
Expand All @@ -329,7 +329,7 @@ def interpret_tableau(self) -> dict[str, float]:
# If there is only one nonzero value in column, which is one
if n_nonzero == 1 and nonzero_val == 1:
rhs_val = self.tableau[nonzero_rowidx, -1]
output_dict[self.col_titles[i]] = rhs_val
output_dict[self.col_titles[i]] = float(rhs_val)
return output_dict


Expand Down
2 changes: 1 addition & 1 deletion machine_learning/decision_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def mean_squared_error(self, labels, prediction):
if labels.ndim != 1:
print("Error: Input labels must be one dimensional")

return np.mean((labels - prediction) ** 2)
return np.mean((labels - prediction) ** 2).item()

def train(self, x, y):
"""
Expand Down
8 changes: 4 additions & 4 deletions machine_learning/forecasting/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def linear_regression_prediction(
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
y = np.array(train_usr)
beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
return float(abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2]))


def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float:
Expand All @@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) ->
)
model_fit = model.fit(disp=False, maxiter=600, method="nm")
result = model_fit.predict(1, len(test_match), exog=[test_match])
return result[0]
return float(result[0])


def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
Expand All @@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
regressor.fit(x_train, train_user)
y_pred = regressor.predict(x_test)
return y_pred[0]
return float(y_pred[0])


def interquartile_range_checker(train_user: list) -> float:
Expand All @@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float:
q3 = np.percentile(train_user, 75)
iqr = q3 - q1
low_lim = q1 - (iqr * 0.1)
return low_lim
return float(low_lim)


def data_safety_checker(list_vote: list, actual_result: float) -> bool:
Expand Down
2 changes: 1 addition & 1 deletion machine_learning/k_nearest_neighbours.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float:
>>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11]))
10.0
"""
return np.linalg.norm(a - b)
return float(np.linalg.norm(a - b))

def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str:
"""
Expand Down
4 changes: 2 additions & 2 deletions machine_learning/logistic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray:
@returns: returns value in the range 0 to 1

Examples:
>>> sigmoid_function(4)
>>> float(sigmoid_function(4))
0.9820137900379085
>>> sigmoid_function(np.array([-3, 3]))
array([0.04742587, 0.95257413])
Expand Down Expand Up @@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float:
References:
- https://en.wikipedia.org/wiki/Logistic_regression
"""
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean())


def log_likelihood(x, y, weights):
Expand Down
30 changes: 15 additions & 15 deletions machine_learning/loss_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def binary_cross_entropy(

y_pred = np.clip(y_pred, epsilon, 1 - epsilon) # Clip predictions to avoid log(0)
bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
return np.mean(bce_loss)
return float(np.mean(bce_loss))


def binary_focal_cross_entropy(
Expand Down Expand Up @@ -87,7 +87,7 @@ def binary_focal_cross_entropy(
+ (1 - alpha) * y_pred**gamma * (1 - y_true) * np.log(1 - y_pred)
)

return np.mean(bcfe_loss)
return float(np.mean(bcfe_loss))


def categorical_cross_entropy(
Expand Down Expand Up @@ -145,7 +145,7 @@ def categorical_cross_entropy(
raise ValueError("Predicted probabilities must sum to approximately 1.")

y_pred = np.clip(y_pred, epsilon, 1) # Clip predictions to avoid log(0)
return -np.sum(y_true * np.log(y_pred))
return float(-np.sum(y_true * np.log(y_pred)))


def categorical_focal_cross_entropy(
Expand Down Expand Up @@ -247,7 +247,7 @@ def categorical_focal_cross_entropy(
alpha * np.power(1 - y_pred, gamma) * y_true * np.log(y_pred), axis=1
)

return np.mean(cfce_loss)
return float(np.mean(cfce_loss))


def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
Expand Down Expand Up @@ -287,7 +287,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
raise ValueError("y_true can have values -1 or 1 only.")

hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred))
return np.mean(hinge_losses)
return float(np.mean(hinge_losses))


def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
Expand All @@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:

>>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)
>>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102).item()
True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
>>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)
>>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164).item()
True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
Expand Down Expand Up @@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:

>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028)
>>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028).item()
True
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
Expand Down Expand Up @@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:

>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16).item()
True
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16).item()
False
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2])
Expand Down Expand Up @@ -433,7 +433,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl
raise ValueError("Input arrays must have the same length.")

squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2
return np.mean(squared_logarithmic_errors)
return float(np.mean(squared_logarithmic_errors))


def mean_absolute_percentage_error(
Expand Down Expand Up @@ -478,7 +478,7 @@ def mean_absolute_percentage_error(
y_true = np.where(y_true == 0, epsilon, y_true)
absolute_percentage_diff = np.abs((y_true - y_pred) / y_true)

return np.mean(absolute_percentage_diff)
return float(np.mean(absolute_percentage_diff))


def perplexity_loss(
Expand Down Expand Up @@ -570,7 +570,7 @@ def perplexity_loss(
# Calculating perplexity for each sentence
perp_losses = np.exp(np.negative(np.mean(np.log(true_class_pred), axis=1)))

return np.mean(perp_losses)
return float(np.mean(perp_losses))


def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> float:
Expand Down Expand Up @@ -626,7 +626,7 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) ->

diff = np.abs(y_true - y_pred)
loss = np.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta)
return np.mean(loss)
return float(np.mean(loss))


def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float:
Expand Down Expand Up @@ -660,7 +660,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
raise ValueError("Input arrays must have the same length.")

kl_loss = y_true * np.log(y_true / y_pred)
return np.sum(kl_loss)
return float(np.sum(kl_loss))


if __name__ == "__main__":
Expand Down
Loading
Loading