diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 634f0495797b..b58cdcad1481 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -19,7 +19,7 @@ def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float >>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2])) 3.1622776601683795 """ - return np.sqrt(((original - reference) ** 2).mean()) + return float(np.sqrt(((original - reference) ** 2).mean())) def normalize_image( @@ -298,16 +298,18 @@ def haralick_descriptors(matrix: np.ndarray) -> list[float]: # Sum values for descriptors ranging from the first one to the last, # as all are their respective origin matrix and not the resulting value yet. - return [ - maximum_prob, - correlation.sum(), - energy.sum(), - contrast.sum(), - dissimilarity.sum(), - inverse_difference.sum(), - homogeneity.sum(), - entropy.sum(), - ] + return np.array( + [ + maximum_prob, + correlation.sum(), + energy.sum(), + contrast.sum(), + dissimilarity.sum(), + inverse_difference.sum(), + homogeneity.sum(), + entropy.sum(), + ] + ).tolist() def get_descriptors( @@ -335,7 +337,7 @@ def get_descriptors( return np.concatenate(descriptors, axis=None) -def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: +def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float: """ Simple method for calculating the euclidean distance between two points, with type np.ndarray. @@ -346,7 +348,7 @@ def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: >>> euclidean(a, b) 3.3166247903554 """ - return np.sqrt(np.sum(np.square(point_1 - point_2))) + return float(np.sqrt(np.sum(np.square(point_1 - point_2)))) def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]: diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 099bd2871023..daf35aae0300 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -273,7 +273,7 @@ def delete_min(self): # Update min_node self.min_node = None - return min_value + return int(min_value) # No right subtree corner case # The structure of the tree implies that this should be the bottom root # and there is at least one other root @@ -292,7 +292,7 @@ def delete_min(self): if i.val < self.min_node.val: self.min_node = i i = i.parent - return min_value + return int(min_value) # General case # Find the BinomialHeap of the right subtree of min_node bottom_of_new = self.min_node.right @@ -312,7 +312,7 @@ def delete_min(self): self.bottom_root = bottom_of_new self.min_node = min_of_new # print("Single root, multiple nodes case") - return min_value + return int(min_value) # Remaining cases # Construct heap of right subtree new_heap = BinomialHeap( @@ -354,7 +354,7 @@ def delete_min(self): # Merge heaps self.merge_heaps(new_heap) - return min_value + return int(min_value) def pre_order(self): """ diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py index 768f2ad941bc..8f31d41fe35e 100644 --- a/electronics/circular_convolution.py +++ b/electronics/circular_convolution.py @@ -91,7 +91,7 @@ def circular_convolution(self) -> list[float]: final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) # rounding-off to two decimal places - return [round(i, 2) for i in final_signal] + return np.array([round(i, 2) for i in final_signal]).tolist() if __name__ == "__main__": diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 1eef4573ba19..d5b2587c84ea 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -40,11 +40,11 @@ def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. - >>> eval_exponential(0, 0) + >>> eval_exponential(0, 0).item() 1.0 - >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 + >>> (abs(eval_exponential(1, np.pi*1.j)) < 1e-15).item() True - >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 + >>> (abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15).item() True """ return np.exp(z_values) + c_parameter @@ -101,17 +101,17 @@ def iterate_function( >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0]).item() 0j >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1]).item() (1+0j) >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2]).item() (256+0j) """ diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 9d906f179c92..8a7430c34743 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -44,7 +44,7 @@ def basis_function(self, t: float) -> list[float]: ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(output_values), 5) == 1 - return output_values + return [float(i) for i in output_values] def bezier_curve_function(self, t: float) -> tuple[float, float]: """ diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py index c23d8234328a..25363c37e674 100644 --- a/graphs/dijkstra_binary_grid.py +++ b/graphs/dijkstra_binary_grid.py @@ -69,7 +69,7 @@ def dijkstra( x, y = predecessors[x, y] path.append(source) # add the source manually path.reverse() - return matrix[destination], path + return float(matrix[destination]), path for i in range(len(dx)): nx, ny = x + dx[i], y + dy[i] @@ -80,7 +80,7 @@ def dijkstra( matrix[nx, ny] = dist + 1 predecessors[nx, ny] = (x, y) - return np.inf, [] + return float(np.inf), [] if __name__ == "__main__": diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 24fbd9a5e002..83c2ce48c3a0 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -78,7 +78,7 @@ def power_iteration( if is_complex: lambda_ = np.real(lambda_) - return lambda_, vector + return float(lambda_), vector def test_power_iteration() -> None: diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py index dc171bacd3a2..fa076adc60c6 100644 --- a/linear_programming/simplex.py +++ b/linear_programming/simplex.py @@ -144,7 +144,7 @@ def find_pivot(self) -> tuple[Any, Any]: # Arg of minimum quotient excluding the nan values. n_stages is added # to compensate for earlier exclusion of objective columns row_idx = np.nanargmin(quotients) + self.n_stages - return row_idx, col_idx + return row_idx.item(), col_idx.item() def pivot(self, row_idx: int, col_idx: int) -> np.ndarray: """Pivots on value on the intersection of pivot row and column. @@ -315,7 +315,7 @@ def interpret_tableau(self) -> dict[str, float]: {'P': 5.0, 'x1': 1.0, 'x2': 1.0} """ # P = RHS of final tableau - output_dict = {"P": abs(self.tableau[0, -1])} + output_dict = {"P": float(abs(self.tableau[0, -1]))} for i in range(self.n_vars): # Gives indices of nonzero entries in the ith column @@ -329,7 +329,7 @@ def interpret_tableau(self) -> dict[str, float]: # If there is only one nonzero value in column, which is one if n_nonzero == 1 and nonzero_val == 1: rhs_val = self.tableau[nonzero_rowidx, -1] - output_dict[self.col_titles[i]] = rhs_val + output_dict[self.col_titles[i]] = float(rhs_val) return output_dict diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index d0bd6ab0b555..d30fc7e13710 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -40,7 +40,7 @@ def mean_squared_error(self, labels, prediction): if labels.ndim != 1: print("Error: Input labels must be one dimensional") - return np.mean((labels - prediction) ** 2) + return np.mean((labels - prediction) ** 2).item() def train(self, x, y): """ diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index dbb86caf8568..18774cc7e5b3 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -34,7 +34,7 @@ def linear_regression_prediction( x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]) y = np.array(train_usr) beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y) - return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2]) + return float(abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])) def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float: @@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> ) model_fit = model.fit(disp=False, maxiter=600, method="nm") result = model_fit.predict(1, len(test_match), exog=[test_match]) - return result[0] + return float(result[0]) def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float: @@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1) regressor.fit(x_train, train_user) y_pred = regressor.predict(x_test) - return y_pred[0] + return float(y_pred[0]) def interquartile_range_checker(train_user: list) -> float: @@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float: q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) - return low_lim + return float(low_lim) def data_safety_checker(list_vote: list, actual_result: float) -> bool: diff --git a/machine_learning/k_nearest_neighbours.py b/machine_learning/k_nearest_neighbours.py index a43757c5c20e..fbc1b8bd227e 100644 --- a/machine_learning/k_nearest_neighbours.py +++ b/machine_learning/k_nearest_neighbours.py @@ -42,7 +42,7 @@ def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float: >>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11])) 10.0 """ - return np.linalg.norm(a - b) + return float(np.linalg.norm(a - b)) def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str: """ diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 090af5382185..496026631fbe 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray: @returns: returns value in the range 0 to 1 Examples: - >>> sigmoid_function(4) + >>> float(sigmoid_function(4)) 0.9820137900379085 >>> sigmoid_function(np.array([-3, 3])) array([0.04742587, 0.95257413]) @@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float: References: - https://en.wikipedia.org/wiki/Logistic_regression """ - return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() + return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()) def log_likelihood(x, y, weights): diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 150035661eb7..929b4c3f391d 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -36,7 +36,7 @@ def binary_cross_entropy( y_pred = np.clip(y_pred, epsilon, 1 - epsilon) # Clip predictions to avoid log(0) bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) - return np.mean(bce_loss) + return float(np.mean(bce_loss)) def binary_focal_cross_entropy( @@ -87,7 +87,7 @@ def binary_focal_cross_entropy( + (1 - alpha) * y_pred**gamma * (1 - y_true) * np.log(1 - y_pred) ) - return np.mean(bcfe_loss) + return float(np.mean(bcfe_loss)) def categorical_cross_entropy( @@ -145,7 +145,7 @@ def categorical_cross_entropy( raise ValueError("Predicted probabilities must sum to approximately 1.") y_pred = np.clip(y_pred, epsilon, 1) # Clip predictions to avoid log(0) - return -np.sum(y_true * np.log(y_pred)) + return float(-np.sum(y_true * np.log(y_pred))) def categorical_focal_cross_entropy( @@ -247,7 +247,7 @@ def categorical_focal_cross_entropy( alpha * np.power(1 - y_pred, gamma) * y_true * np.log(y_pred), axis=1 ) - return np.mean(cfce_loss) + return float(np.mean(cfce_loss)) def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: @@ -287,7 +287,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: raise ValueError("y_true can have values -1 or 1 only.") hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred)) - return np.mean(hinge_losses) + return float(np.mean(hinge_losses)) def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: @@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102).item() True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) - >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164).item() True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) @@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028) + >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028).item() True >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) + >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16).item() True >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) + >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16).item() False >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2]) @@ -433,7 +433,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl raise ValueError("Input arrays must have the same length.") squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2 - return np.mean(squared_logarithmic_errors) + return float(np.mean(squared_logarithmic_errors)) def mean_absolute_percentage_error( @@ -478,7 +478,7 @@ def mean_absolute_percentage_error( y_true = np.where(y_true == 0, epsilon, y_true) absolute_percentage_diff = np.abs((y_true - y_pred) / y_true) - return np.mean(absolute_percentage_diff) + return float(np.mean(absolute_percentage_diff)) def perplexity_loss( @@ -570,7 +570,7 @@ def perplexity_loss( # Calculating perplexity for each sentence perp_losses = np.exp(np.negative(np.mean(np.log(true_class_pred), axis=1))) - return np.mean(perp_losses) + return float(np.mean(perp_losses)) def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> float: @@ -626,7 +626,7 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> diff = np.abs(y_true - y_pred) loss = np.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta) - return np.mean(loss) + return float(np.mean(loss)) def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float: @@ -660,7 +660,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float raise ValueError("Input arrays must have the same length.") kl_loss = y_true * np.log(y_true / y_pred) - return np.sum(kl_loss) + return float(np.sum(kl_loss)) if __name__ == "__main__": diff --git a/machine_learning/mfcc.py b/machine_learning/mfcc.py index a1e99ce4ad40..21d6f921340b 100644 --- a/machine_learning/mfcc.py +++ b/machine_learning/mfcc.py @@ -162,9 +162,9 @@ def normalize(audio: np.ndarray) -> np.ndarray: Examples: >>> audio = np.array([1, 2, 3, 4, 5]) >>> normalized_audio = normalize(audio) - >>> np.max(normalized_audio) + >>> float(np.max(normalized_audio)) 1.0 - >>> np.min(normalized_audio) + >>> float(np.min(normalized_audio)) 0.2 """ # Divide the entire audio signal by the maximum absolute value @@ -285,7 +285,7 @@ def freq_to_mel(freq: float) -> float: 999.99 """ # Use the formula to convert frequency to the mel scale - return 2595.0 * np.log10(1.0 + freq / 700.0) + return float(2595.0 * np.log10(1.0 + freq / 700.0)) def mel_to_freq(mels: float) -> float: @@ -321,7 +321,7 @@ def mel_spaced_filterbank( Mel-spaced filter bank. Examples: - >>> round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10) + >>> round(float(mel_spaced_filterbank(8000, 10, 1024)[0][1]), 10) 0.0004603981 """ freq_min = 0 @@ -438,7 +438,7 @@ def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarra The DCT basis matrix. Examples: - >>> round(discrete_cosine_transform(3, 5)[0][0], 5) + >>> round(float(discrete_cosine_transform(3, 5)[0][0]), 5) 0.44721 """ basis = np.empty((dct_filter_num, filter_num)) diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py index e99a4131e972..1bf59f44edcc 100644 --- a/machine_learning/multilayer_perceptron_classifier.py +++ b/machine_learning/multilayer_perceptron_classifier.py @@ -20,7 +20,7 @@ def wrapper(y): >>> wrapper(Y) [0, 0, 1] """ - return list(y) + return (y).tolist() if __name__ == "__main__": diff --git a/machine_learning/scoring_functions.py b/machine_learning/scoring_functions.py index 08b969a95c3b..ec832d6a08b7 100644 --- a/machine_learning/scoring_functions.py +++ b/machine_learning/scoring_functions.py @@ -20,11 +20,11 @@ def mae(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mae(predict,actual),decimals = 2) + >>> float(np.around(mae(predict,actual),decimals = 2)) 0.67 >>> actual = [1,1,1];predict = [1,1,1] - >>> mae(predict,actual) + >>> float(mae(predict,actual)) 0.0 """ predict = np.array(predict) @@ -41,11 +41,11 @@ def mse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mse(predict,actual),decimals = 2) + >>> float(np.around(mse(predict,actual),decimals = 2)) 1.33 >>> actual = [1,1,1];predict = [1,1,1] - >>> mse(predict,actual) + >>> float(mse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -63,11 +63,11 @@ def rmse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(rmse(predict,actual),decimals = 2) + >>> float(np.around(rmse(predict,actual),decimals = 2)) 1.15 >>> actual = [1,1,1];predict = [1,1,1] - >>> rmse(predict,actual) + >>> float(rmse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -85,11 +85,11 @@ def rmsle(predict, actual): """ Examples(rounded for precision): >>> actual = [10,10,30];predict = [10,2,30] - >>> np.around(rmsle(predict,actual),decimals = 2) + >>> float(np.around(rmsle(predict,actual),decimals = 2)) 0.75 >>> actual = [1,1,1];predict = [1,1,1] - >>> rmsle(predict,actual) + >>> float(rmsle(predict,actual)) 0.0 """ predict = np.array(predict) @@ -117,12 +117,12 @@ def mbd(predict, actual): Here the model overpredicts >>> actual = [1,2,3];predict = [2,3,4] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) 50.0 Here the model underpredicts >>> actual = [1,2,3];predict = [0,1,1] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) -66.67 """ predict = np.array(predict) diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 0bc3b17d7e5a..c8a573796882 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -153,7 +153,7 @@ def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float: >>> cosine_similarity(np.array([1, 2]), np.array([6, 32])) 0.9615239476408232 """ - return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) + return float(np.dot(input_a, input_b) / (norm(input_a) * norm(input_b))) if __name__ == "__main__": diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index 24046115ebc4..5213dd3891d8 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -3,7 +3,7 @@ from scipy.optimize import Bounds, LinearConstraint, minimize -def norm_squared(vector: ndarray) -> float: +def norm_squared(vector: ndarray) -> float | int: """ Return the squared second norm of vector norm_squared(v) = sum(x * x for x in v) @@ -21,7 +21,7 @@ def norm_squared(vector: ndarray) -> float: >>> norm_squared([0, 0]) 0 """ - return np.dot(vector, vector) + return (np.dot(vector, vector)).item() class SVC: diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 9b29b37b0ce6..6e61aebe0610 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -22,7 +22,7 @@ def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) 8.0 """ - return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) + return float(np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))) def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut: diff --git a/maths/euler_method.py b/maths/euler_method.py index 30f193e6daa5..c6adb07e2d3d 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -26,7 +26,7 @@ def explicit_euler( ... return y >>> y0 = 1 >>> y = explicit_euler(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 144.77277243257308 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/euler_modified.py b/maths/euler_modified.py index d02123e1e2fb..bb282e9f0ab9 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -24,13 +24,13 @@ def euler_modified( >>> def f1(x, y): ... return -2*x*(y**2) >>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0) - >>> y[-1] + >>> float(y[-1]) 0.503338255442106 >>> import math >>> def f2(x, y): ... return -2*y + (x**3)*math.exp(-2*x) >>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3) - >>> y[-1] + >>> float(y[-1]) 0.5525976431951775 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/gaussian.py b/maths/gaussian.py index 0e02010a9c67..49adc50f755c 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -7,16 +7,16 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: """ - >>> gaussian(1) + >>> float(gaussian(1)) 0.24197072451914337 - >>> gaussian(24) + >>> float(gaussian(24)) 3.342714441794458e-126 - >>> gaussian(1, 4, 2) + >>> float(gaussian(1, 4, 2)) 0.06475879783294587 - >>> gaussian(1, 5, 3) + >>> float(gaussian(1, 5, 3)) 0.05467002489199788 Supports NumPy Arrays @@ -29,7 +29,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27, 2.14638374e-32, 7.99882776e-38, 1.09660656e-43]) - >>> gaussian(15) + >>> float(gaussian(15)) 5.530709549844416e-50 >>> gaussian([1,2, 'string']) @@ -47,10 +47,10 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: ... OverflowError: (34, 'Result too large') - >>> gaussian(10**-326) + >>> float(gaussian(10**-326)) 0.3989422804014327 - >>> gaussian(2523, mu=234234, sigma=3425) + >>> float(gaussian(2523, mu=234234, sigma=3425)) 0.0 """ return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2)) diff --git a/maths/minkowski_distance.py b/maths/minkowski_distance.py index 3237124e8d36..99f02e31e417 100644 --- a/maths/minkowski_distance.py +++ b/maths/minkowski_distance.py @@ -19,7 +19,7 @@ def minkowski_distance( >>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2) 8.0 >>> import numpy as np - >>> np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)) + >>> bool(np.isclose(5.0, minkowski_distance([5.0], [0.0], 3))) True >>> minkowski_distance([1.0], [2.0], -1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py index fb406171098a..26244a58552f 100644 --- a/maths/numerical_analysis/adams_bashforth.py +++ b/maths/numerical_analysis/adams_bashforth.py @@ -102,7 +102,7 @@ def step_3(self) -> np.ndarray: >>> def f(x, y): ... return x + y >>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3() - >>> y[3] + >>> float(y[3]) 0.15533333333333332 >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3() @@ -140,9 +140,9 @@ def step_4(self) -> np.ndarray: ... return x + y >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4() - >>> y[4] + >>> float(y[4]) 0.30699999999999994 - >>> y[5] + >>> float(y[5]) 0.5771083333333333 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4() @@ -185,7 +185,7 @@ def step_5(self) -> np.ndarray: >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536], ... 0.2, 1).step_5() - >>> y[-1] + >>> float(y[-1]) 0.05436839444444452 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5() diff --git a/maths/numerical_analysis/runge_kutta.py b/maths/numerical_analysis/runge_kutta.py index 4cac017ee89e..3a25b0fb0173 100644 --- a/maths/numerical_analysis/runge_kutta.py +++ b/maths/numerical_analysis/runge_kutta.py @@ -19,7 +19,7 @@ def runge_kutta(f, y0, x0, h, x_end): ... return y >>> y0 = 1 >>> y = runge_kutta(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 148.41315904125113 """ n = int(np.ceil((x_end - x0) / h)) diff --git a/maths/numerical_analysis/runge_kutta_fehlberg_45.py b/maths/numerical_analysis/runge_kutta_fehlberg_45.py index 8181fe3015fc..0fbd60a35c1a 100644 --- a/maths/numerical_analysis/runge_kutta_fehlberg_45.py +++ b/maths/numerical_analysis/runge_kutta_fehlberg_45.py @@ -34,12 +34,12 @@ def runge_kutta_fehlberg_45( >>> def f(x, y): ... return 1 + y**2 >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1) - >>> y[1] + >>> float(y[1]) 0.2027100937470787 >>> def f(x,y): ... return x >>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0) - >>> y[1] + >>> float(y[1]) -0.18000000000000002 >>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py index 451cde4cb935..5d9672679813 100644 --- a/maths/numerical_analysis/runge_kutta_gills.py +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -34,7 +34,7 @@ def runge_kutta_gills( >>> def f(x, y): ... return (x-y)/2 >>> y = runge_kutta_gills(f, 0, 3, 0.2, 5) - >>> y[-1] + >>> float(y[-1]) 3.4104259225717537 >>> def f(x,y): diff --git a/maths/softmax.py b/maths/softmax.py index 04cf77525420..95c95e66f59e 100644 --- a/maths/softmax.py +++ b/maths/softmax.py @@ -28,7 +28,7 @@ def softmax(vector): The softmax vector adds up to one. We need to ceil to mitigate for precision - >>> np.ceil(np.sum(softmax([1,2,3,4]))) + >>> float(np.ceil(np.sum(softmax([1,2,3,4])))) 1.0 >>> vec = np.array([5,5]) diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index d488de590cc2..1b7c0beed3ba 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -64,7 +64,7 @@ def feedforward(self) -> np.ndarray: >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> array_sum = np.sum(res) - >>> np.isnan(array_sum) + >>> bool(np.isnan(array_sum)) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the @@ -105,7 +105,7 @@ def back_propagation(self) -> None: >>> res = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (res == updated_weights).all() + >>> bool((res == updated_weights).all()) False """ @@ -171,7 +171,7 @@ def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None: >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (first_iteration_weights == updated_weights).all() + >>> bool((first_iteration_weights == updated_weights).all()) False """ for iteration in range(1, iterations + 1): diff --git a/other/bankers_algorithm.py b/other/bankers_algorithm.py index 858eb0b2c524..d18229ecaac3 100644 --- a/other/bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -93,7 +93,7 @@ def __need_index_manager(self) -> dict[int, list[int]]: {0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], 4: [2, 0, 0, 3]} """ - return {self.__need().index(i): i for i in self.__need()} + return {self.__need().index(i): np.array(i).tolist() for i in self.__need()} def main(self, **kwargs) -> None: """ diff --git a/physics/in_static_equilibrium.py b/physics/in_static_equilibrium.py index e3c2f9d07aed..118d17a620bc 100644 --- a/physics/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -47,7 +47,7 @@ def in_static_equilibrium( ....] >>> force = array([[1, 1], [-1, 2]]) >>> location = array([[1, 0], [10, 0]]) - >>> in_static_equilibrium(force, location) + >>> bool(in_static_equilibrium(force, location)) False """ # summation of moments is zero diff --git a/requirements.txt b/requirements.txt index bb3d671393b9..342c1233ba19 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,10 +3,10 @@ fake_useragent imageio keras ; python_version < '3.12' lxml -matplotlib -numpy -opencv-python -pandas +matplotlib>=3.8.4 +numpy>=2.0.0 +opencv-python>=4.10.0.84 +pandas>=2.2.2 pillow # projectq # uncomment once quantum/quantum_random.py is fixed qiskit ; python_version < '3.12' @@ -14,8 +14,8 @@ qiskit-aer ; python_version < '3.12' requests rich # scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed -scikit-learn -statsmodels +scikit-learn>=1.4.2 +statsmodels>=0.14.2 sympy tensorflow tweepy