Skip to content

Commit 76a11d1

Browse files
Update scaled_exponential_linear_unit.py
1 parent b5a4d22 commit 76a11d1

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

neural_network/activation_functions/scaled_exponential_linear_unit.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,26 +16,26 @@
1616

1717

1818
def scaled_exponential_linear_unit(
19-
vector: np.ndarray, alpha: float = 1.6732, _lambda: float = 1.0507
19+
vector: np.ndarray, alpha: float = 1.6732, lambda_: float = 1.0507
2020
) -> np.ndarray:
2121
"""
2222
Applies the Scaled Exponential Linear Unit function to each element of the vector.
2323
Parameters :
2424
vector : np.ndarray
2525
alpha : float (default = 1.6732)
26-
_lambda : float (default = 1.0507)
26+
lambda_ : float (default = 1.0507)
2727
2828
Returns : np.ndarray
29-
Formula : f(x) = _lambda * x if x > 0
30-
_lambda * alpha * (e**x - 1) if x <= 0
29+
Formula : f(x) = lambda_ * x if x > 0
30+
lambda_ * alpha * (e**x - 1) if x <= 0
3131
Examples :
3232
>>> scaled_exponential_linear_unit(vector=np.array([1.3, 3.7, 2.4]))
3333
array([1.36591, 3.88759, 2.52168])
3434
3535
>>> scaled_exponential_linear_unit(vector=np.array([1.3, 4.7, 8.2]))
3636
array([1.36591, 4.93829, 8.61574])
3737
"""
38-
return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1))
38+
return lambda_ * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1))
3939

4040

4141
if __name__ == "__main__":

0 commit comments

Comments
 (0)