Add GaussianDropout layer.
This commit is contained in:
parent
e1aa2c513a
commit
66e586ec70
@ -8,3 +8,4 @@ from keras_core.layers.regularization.activity_regularization import (
|
||||
ActivityRegularization,
|
||||
)
|
||||
from keras_core.layers.regularization.dropout import Dropout
|
||||
from keras_core.layers.regularization.gaussian_dropout import GaussianDropout
|
||||
|
@ -1,4 +1,3 @@
|
||||
from keras_core import backend
|
||||
from keras_core import constraints
|
||||
from keras_core import initializers
|
||||
from keras_core import operations as ops
|
||||
|
@ -338,6 +338,7 @@ class Layer(Operation):
|
||||
kwargs["training"] = training
|
||||
|
||||
# TODO: Populate mask argument(s)
|
||||
# if self._mask_has_training_arg():
|
||||
|
||||
# Call the layer.
|
||||
try:
|
||||
|
63
keras_core/layers/regularization/gaussian_dropout.py
Normal file
63
keras_core/layers/regularization/gaussian_dropout.py
Normal file
@ -0,0 +1,63 @@
|
||||
import math
|
||||
|
||||
from keras_core import backend
|
||||
from keras_core import layers
|
||||
from keras_core import operations as ops
|
||||
from keras_core.api_export import keras_core_export
|
||||
|
||||
|
||||
@keras_core_export("keras_core.layers.GaussianDropout")
|
||||
class GaussianDropout(layers.Layer):
|
||||
"""Apply multiplicative 1-centered Gaussian noise.
|
||||
|
||||
As it is a regularization layer, it is only active at training time.
|
||||
|
||||
Args:
|
||||
rate: Float, drop probability (as with `Dropout`).
|
||||
The multiplicative noise will have
|
||||
standard deviation `sqrt(rate / (1 - rate))`.
|
||||
seed: Integer, optional random seed to enable deterministic behavior.
|
||||
|
||||
Call arguments:
|
||||
inputs: Input tensor (of any rank).
|
||||
training: Python boolean indicating whether the layer should behave in
|
||||
training mode (adding dropout) or in inference mode (doing nothing).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, rate, noise_shape=None, seed=None, name=None, dtype=None
|
||||
):
|
||||
super().__init__(name=name, dtype=dtype)
|
||||
if isinstance(rate, (int, float)) and not 0 <= rate <= 1:
|
||||
raise ValueError(
|
||||
f"Invalid value received for argument "
|
||||
"`rate`. Expected a float value between 0 and 1. "
|
||||
f"Received: rate={rate}"
|
||||
)
|
||||
self.rate = rate
|
||||
self.seed = seed
|
||||
self.noise_shape = noise_shape
|
||||
self.seed_generator = backend.random.SeedGenerator(seed)
|
||||
self.supports_masking = True
|
||||
|
||||
def call(self, inputs, training=False):
|
||||
if training and self.rate > 0:
|
||||
stddev = math.sqrt(self.rate / (1.0 - self.rate))
|
||||
return inputs * backend.random.normal(
|
||||
shape=ops.shape(inputs),
|
||||
mean=1.0,
|
||||
stddev=stddev,
|
||||
seed=self.seed_generator,
|
||||
)
|
||||
return inputs
|
||||
|
||||
def compute_output_shape(self, input_shape):
|
||||
return input_shape
|
||||
|
||||
def get_config(self):
|
||||
base_config = super().get_config()
|
||||
config = {
|
||||
"rate": self.rate,
|
||||
"seed": self.seed,
|
||||
}
|
||||
return {**base_config, **config}
|
29
keras_core/layers/regularization/gaussian_dropout_test.py
Normal file
29
keras_core/layers/regularization/gaussian_dropout_test.py
Normal file
@ -0,0 +1,29 @@
|
||||
import numpy as np
|
||||
|
||||
from keras_core import layers
|
||||
from keras_core import testing
|
||||
|
||||
|
||||
class GaussianDropoutTest(testing.TestCase):
|
||||
def test_gaussian_dropout_basics(self):
|
||||
self.run_layer_test(
|
||||
layers.Dropout,
|
||||
init_kwargs={
|
||||
"rate": 0.2,
|
||||
},
|
||||
input_shape=(2, 3),
|
||||
expected_output_shape=(2, 3),
|
||||
expected_num_trainable_weights=0,
|
||||
expected_num_non_trainable_weights=0,
|
||||
expected_num_seed_generators=1,
|
||||
expected_num_losses=0,
|
||||
supports_masking=True,
|
||||
)
|
||||
|
||||
def test_gaussian_dropout_correctness(self):
|
||||
inputs = np.ones((20, 500))
|
||||
layer = layers.GaussianDropout(0.3, seed=1337)
|
||||
outputs = layer(inputs, training=True)
|
||||
self.assertAllClose(
|
||||
np.std(outputs), np.sqrt(0.3 / (1 - 0.3)), atol=0.02
|
||||
)
|
Loading…
Reference in New Issue
Block a user