Package rdkit :: Package ML :: Package Neural :: Module ActFuncs
[hide private]
[frames] | no frames]

Source Code for Module rdkit.ML.Neural.ActFuncs

 1  # 
 2  #  Copyright (C) 2000-2008  greg Landrum 
 3  # 
 4  """ Activation functions for neural network nodes 
 5   
 6  Activation functions should implement the following API: 
 7   
 8   - _Eval(input)_: returns the value of the function at a given point 
 9   
10   - _Deriv(input)_: returns the derivative of the function at a given point 
11   
12  The current Backprop implementation also requires: 
13   
14   - _DerivFromVal(val)_: returns the derivative of the function when its 
15                          value is val 
16   
17  In all cases _input_ is a float as is the value returned. 
18   
19  """ 
20  import math 
21     
22 -class ActFunc(object):
23 """ "virtual base class" for activation functions 24 25 """
26 - def __call__(self,input):
27 return self.Eval(input)
28 29
30 -class Sigmoid(ActFunc):
31 """ the standard sigmoidal function """
32 - def Eval(self,input):
33 return 1./(1.+math.exp(-self.beta*input))
34
35 - def Deriv(self,input):
36 val = self.Eval(input) 37 return self.beta * val * (1. - val)
38
39 - def DerivFromVal(self,val):
40 return self.beta * val * (1. - val)
41
42 - def __init__(self,beta=1.):
43 self.beta=beta
44
45 -class TanH(ActFunc):
46 """ the standard hyperbolic tangent function """
47 - def Eval(self,input):
48 v1 = math.exp(self.beta*input) 49 v2 = math.exp(-self.beta*input) 50 return (v1 - v2)/(v1 + v2)
51
52 - def Deriv(self,input):
53 val = self.Eval(input) 54 return self.beta * (1 - val*val)
55
56 - def DerivFromVal(self,val):
57 return self.beta * (1 - val*val)
58
59 - def __init__(self,beta=1.):
60 self.beta = beta
61