Remove DenoisingAutoEncoder

This commit is contained in:
fchollet 2015-07-05 11:09:22 -07:00
parent b22e547e98
commit 8995b50a96
3 changed files with 8 additions and 73 deletions

@ -161,44 +161,6 @@ decoder = containers.Sequential([Dense(8, 16), Dense(16, 32)])
autoencoder.add(AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=False, tie_weights=True))
```
---
## DenoisingAutoEncoder
```python
keras.layers.core.AutoEncoder(encoder, decoder, output_reconstruction=True, tie_weights=False, weights=None, corruption_level=0.3):
```
A denoising autoencoder model that inherits the base features from autoencoder.
Since this layer uses similar logic to Dropout it cannot be the first layer in a pipeline.
- __Input shape__: The layer shape is defined by the encoder definitions
- __Output shape__: The layer shape is defined by the decoder definitions
- __Arguments__:
- __encoder__: A [layer](./) or [layer container](./containers.md).
- __decoder__: A [layer](./) or [layer container](./containers.md).
- __output_reconstruction__: If this is False the when .predict() is called the output is the deepest hidden layer's activation. Otherwise the output of the final decoder layer is presented. Be sure your validation data confirms to this logic if you decide to use any.
- __tie_weights__: If True then the encoder bias is tied to the decoder bias. **Note**: This required the encoder layer corresponding to this decoder layer to be of the same time, eg: Dense:Dense
- __weights__: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`.
- __corruption_level__: the amount of binomial noise added to the input layer of the model.
- __Example__:
```python
# input shape: (nb_samples, 32)
autoencoder.add(Dense(32, 32))
autoencoder.add(DenoisingAutoEncoder(encoder=Dense(32, 16),
decoder=Dense(16, 32),
output_reconstruction=False, tie_weights=True,
corruption_level=0.3))
```
---

@ -494,34 +494,6 @@ class AutoEncoder(Layer):
"tie_weights":self.tie_weights}
class DenoisingAutoEncoder(AutoEncoder):
'''
A denoising autoencoder model that inherits the base features from autoencoder
'''
def __init__(self, encoder=None, decoder=None, output_reconstruction=True, tie_weights=False, weights=None, corruption_level=0.3):
super(DenoisingAutoEncoder, self).__init__(encoder, decoder, output_reconstruction, tie_weights, weights)
self.corruption_level = corruption_level
def _corrupt_input(self, X):
"""
http://deeplearning.net/tutorial/dA.html
"""
return X * srng.binomial(size=X.shape, n=1,
p=1-self.corruption_level,
dtype=theano.config.floatX)
def get_input(self, train=False):
uncorrupted_input = super(DenoisingAutoEncoder, self).get_input(train)
return self._corrupt_input(uncorrupted_input)
def get_config(self):
return {"name":self.__class__.__name__,
"encoder_config":self.encoder.get_config(),
"decoder_config":self.decoder.get_config(),
"corruption_level":self.corruption_level,
"output_reconstruction":self.output_reconstruction,
"tie_weights":self.tie_weights}
class MaxoutDense(Layer):
'''

@ -71,7 +71,7 @@ class SimpleRNN(Recurrent):
'''
return self.activation(x_t + mask_tm1 * T.dot(h_tm1, u))
def get_output(self, train):
def get_output(self, train=False):
X = self.get_input(train) # shape: (nb_samples, time (padded with zeros), input_dim)
# new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
@ -149,7 +149,7 @@ class SimpleDeepRNN(Recurrent):
o += mask_tmi*self.inner_activation(T.dot(h_tmi, U_tmi))
return self.activation(o)
def get_output(self, train):
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=self.depth)
X = X.dimshuffle((1, 0, 2))
@ -263,7 +263,7 @@ class GRU(Recurrent):
h_t = z * h_mask_tm1 + (1 - z) * hh_t
return h_t
def get_output(self, train):
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
@ -379,7 +379,7 @@ class LSTM(Recurrent):
h_t = o_t * self.activation(c_t)
return h_t, c_t
def get_output(self, train):
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
@ -493,7 +493,7 @@ class JZS1(Recurrent):
h_t = hh_t * z + h_mask_tm1 * (1 - z)
return h_t
def get_output(self, train):
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
@ -601,7 +601,7 @@ class JZS2(Recurrent):
h_t = hh_t * z + h_mask_tm1 * (1 - z)
return h_t
def get_output(self, train):
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
@ -702,7 +702,7 @@ class JZS3(Recurrent):
h_t = hh_t * z + h_mask_tm1 * (1 - z)
return h_t
def get_output(self, train):
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
@ -734,3 +734,4 @@ class JZS3(Recurrent):