-
Notifications
You must be signed in to change notification settings - Fork 3
/
activation_functions.py
99 lines (62 loc) · 1.72 KB
/
activation_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import numpy as np
def Identity(X):
"""
Identity fuction is the basic activation function
It return the same input value as output.
Parameters:
- X: int ,-Inf to +Inf
Returns:
X , -Inf to +Inf
"""
return X
def BinaryStep(X):
"""
BinaryStep fuction is the basic activation function
It return 0 for non positive value and 1 for all the positive
Parameters:
- X: int -Inf to +Inf
Returns:
int (0 OR 1)
"""
return 0 if X < 0 else 1
def Linear(X, constant=1):
return constant*X
def dLinear(constant=1):
return constant
def Sigmoid(X):
res = 1 / (1 + np.exp(-X))
return res
def dSigmoid(X):
return Sigmoid(X) * (1-Sigmoid(X))
def Tanh(X):
return np.tanh(X)
def dTanh(X):
return 1 - np.power(np.tanh(X), 2)
def ReLu(X):
return np.maximum(0, X)
def dReLu(X):
return 1 if X > 0 else 0
def Leaky_ReLu(X, factor=0.01):
return np.maximum(factor*X, X)
def dLeaky_ReLu(X, factor=0.01):
return 1 if X > 0 else factor
# TODO: d/dX
def Softmax(X):
exp = np.exp(X)
exp_sum = np.sum(np.exp(X))
res = exp/exp_sum
return res
def StableSoftmax(X):
"""A stable implementation of the softmax function which prevents division by a large number (overflow) """
shift = X - np.max(X)
exp = np.exp(shift)
return exp/np.sum(exp)
def dSoftmax(X):
"""Derivation of the softmax activation function. Derived from the iterative implementation which uses jacobians """
s = Softmax(X)
s = s.reshape(-1,1)
return np.diagflat(s) - np.dot(s, s.T)
def GeLu(X):
res = 0.5 * X * (1 + np.tanh(np.sqrt(2 / np.pi) *
(X + 0.044715 * np.power(X, 3))))
return res