-
Notifications
You must be signed in to change notification settings - Fork 0
/
ActivationLayer.py
44 lines (32 loc) · 1.14 KB
/
ActivationLayer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# -*- coding: utf-8 -*-
"""
Implementation of the Activation Layer(Non-Liniarity)
"""
import numpy as np
class Activation:
def __init__(self, activation = 'relu'):
self.activation = activation
def Relu(self, x, deriv = 'False'):
if(deriv == 'False'):
res = np.maximum(0,x)
else:
res = np.where(x>0, 1, 0)
return res
def Leaky_Relu(self, x, deriv = 'False'):
if(deriv == 'False'):
return np.maximum(x*0.1, x)
else:
return np.where(x>0, 1, 0.1)
def forward(self, input):
if(self.activation == 'relu'):
self.Z = self.Relu(input)
elif(self.activation == 'leaky_relu'):
self.Z = self.Leaky_Relu(input)
return self.Z
def backprop(self, dLdOut):
if(self.activation == 'relu'):
dOutdZ = self.Relu(self.Z, deriv = 'True')
elif(self.activation == 'leaky_relu'):
dOutdZ = self.Leaky_Relu(self.Z, deriv = 'True')
dLdZ = np.multiply(dOutdZ, dLdOut)
return dLdZ