Add ActivityRegularization layer

This commit is contained in:
Francois Chollet 2023-04-26 16:19:17 -07:00
parent d03973f1da
commit 4ed93c861e
5 changed files with 77 additions and 1 deletions

@ -3,4 +3,7 @@ from keras_core.layers.core.dense import Dense
from keras_core.layers.core.input_layer import Input from keras_core.layers.core.input_layer import Input
from keras_core.layers.core.input_layer import InputLayer from keras_core.layers.core.input_layer import InputLayer
from keras_core.layers.layer import Layer from keras_core.layers.layer import Layer
from keras_core.layers.regularization.activity_regularization import (
ActivityRegularization,
)
from keras_core.layers.regularization.dropout import Dropout from keras_core.layers.regularization.dropout import Dropout

@ -343,7 +343,9 @@ class Layer(Operation):
outputs = super().__call__(*args, **kwargs) outputs = super().__call__(*args, **kwargs)
# Record activity regularizer loss. # Record activity regularizer loss.
if self.activity_regularizer is not None: if self.activity_regularizer is not None:
self.add_loss(self.activity_regularizer(outputs)) for output in nest.flatten(outputs):
if backend.is_tensor(output):
self.add_loss(self.activity_regularizer(output))
# TODO: Set masks on outputs # TODO: Set masks on outputs
# self._set_mask_metadata(inputs, outputs, previous_mask) # self._set_mask_metadata(inputs, outputs, previous_mask)

@ -149,6 +149,11 @@ class LayerTest(testing.TestCase):
self.assertLen(layer.losses, 1) self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 0.01) self.assertAllClose(layer.losses[0], 0.01)
# KerasTensors are no op
layer = ActivityRegularizer(activity_regularizer="l1")
layer(layers.Input((2, 2)))
self.assertLen(layer.losses, 0)
def test_add_loss(self): def test_add_loss(self):
class LossLayer(layers.Layer): class LossLayer(layers.Layer):
def call(self, x): def call(self, x):

@ -0,0 +1,40 @@
from keras_core import regularizers
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}

@ -0,0 +1,26 @@
import numpy as np
from keras_core import layers
from keras_core.testing import test_case
class ActivityRegularizationTest(test_case.TestCase):
def test_correctness(self):
layer = layers.ActivityRegularization(l1=0.2, l2=0.3)
layer(2 * np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2)
def test_basics(self):
self.run_layer_test(
layers.ActivityRegularization,
{"l1": 0.1, "l2": 0.2},
input_shape=(2, 3),
input_dtype="float32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=True,
)