Merge pull request #381 from osh/thresh_activ

adding thresholded linear and rectified activation functions
This commit is contained in:
François Chollet 2015-08-24 23:17:07 -07:00
commit 14e4a2391a
2 changed files with 77 additions and 1 deletions

@ -52,4 +52,40 @@ Parametric Softplus of the form: (`f(x) = alpha * (1 + exp(beta * x))`). This is
- __input_shape__: tuple.
- __References__:
- [Inferring Nonlinear Neuronal Computation Based on Physiologically Plausible Inputs](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003143)
- [Inferring Nonlinear Neuronal Computation Based on Physiologically Plausible Inputs](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003143)
## Thresholded Linear
```python
keras.layers.advanced_activations.ThresholdedLinear(theta)
```
Parametrized linear unit. provides a threshold near zero where values are zeroed.
- __Input shape__: Same as `input_shape`. This layer cannot be used as first layer in a model.
- __Output shape__: Same as input.
- __Arguments__:
- __theta__: float >= 0. Threshold location of activation
- __References__:
- [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/pdf/1402.3337.pdf)
## Thresholded ReLu
```python
keras.layers.advanced_activations.ThresholdedReLu(theta)
```
Parametrized rectified linear unit. provides a threshold near zero where values are zeroed.
- __Input shape__: Same as `input_shape`. This layer cannot be used as first layer in a model.
- __Output shape__: Same as input.
- __Arguments__:
- __theta__: float >= 0. Threshold location of activation
- __References__:
- [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/pdf/1402.3337.pdf)

@ -77,3 +77,43 @@ class ParametricSoftplus(MaskedLayer):
"input_shape": self.input_shape,
"alpha_init": self.alpha_init,
"beta_init": self.beta_init}
class ThresholdedLinear(MaskedLayer):
'''
Thresholded Linear Activation
Reference:
Zero-Bias Autoencoders and the Benefits of Co-Adapting Features
http://arxiv.org/pdf/1402.3337.pdf
'''
def __init__(self, theta=1.0):
super(ThresholdedLinear, self).__init__()
self.theta = theta
def get_output(self, train):
X = self.get_input(train)
return T.switch( abs(X) < self.theta, 0, X )
def get_config(self):
return {"name": self.__class__.__name__,
"theta": self.theta}
class ThresholdedReLu(MaskedLayer):
'''
Thresholded Rectified Activation
Reference:
Zero-Bias Autoencoders and the Benefits of Co-Adapting Features
http://arxiv.org/pdf/1402.3337.pdf
'''
def __init__(self, theta=1.0):
super(ThresholdedReLu, self).__init__()
self.theta = theta
def get_output(self, train):
X = self.get_input(train)
return T.switch( X > self.theta, X, 0 )
def get_config(self):
return {"name": self.__class__.__name__,
"theta": self.theta}