142 lines
4.5 KiB
Python
142 lines
4.5 KiB
Python
from keras_core import operations as ops
|
|
from keras_core.api_export import keras_core_export
|
|
from keras_core.optimizers import optimizer
|
|
|
|
|
|
@keras_core_export(["keras_core.optimizers.Adamax"])
|
|
class Adamax(optimizer.Optimizer):
|
|
"""Optimizer that implements the Adamax algorithm.
|
|
|
|
Adamax, a variant of Adam based on the infinity norm, is a first-order
|
|
gradient-based optimization method. Due to its capability of adjusting the
|
|
learning rate based on data characteristics, it is suited to learn
|
|
time-variant process, e.g., speech data with dynamically changed noise
|
|
conditions. Default parameters follow those provided in the paper (see
|
|
references below).
|
|
|
|
Initialization:
|
|
|
|
```python
|
|
m = 0 # Initialize initial 1st moment vector
|
|
u = 0 # Initialize the exponentially weighted infinity norm
|
|
t = 0 # Initialize timestep
|
|
```
|
|
|
|
The update rule for parameter `w` with gradient `g` is described at the end
|
|
of section 7.1 of the paper (see the referenece section):
|
|
|
|
```python
|
|
t += 1
|
|
m = beta1 * m + (1 - beta) * g
|
|
u = max(beta2 * u, abs(g))
|
|
current_lr = learning_rate / (1 - beta1 ** t)
|
|
w = w - current_lr * m / (u + epsilon)
|
|
```
|
|
|
|
Args:
|
|
learning_rate: A floating point value, or a callable
|
|
that takes no arguments and returns the actual value to use. The
|
|
learning rate. Defaults to `0.001`.
|
|
beta_1: A float value or a constant float tensor. The exponential decay
|
|
rate for the 1st moment estimates.
|
|
beta_2: A float value or a constant float tensor. The exponential decay
|
|
rate for the exponentially weighted infinity norm.
|
|
epsilon: A small constant for numerical stability.
|
|
{{base_optimizer_keyword_args}}
|
|
|
|
Reference:
|
|
|
|
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
learning_rate=0.001,
|
|
beta_1=0.9,
|
|
beta_2=0.999,
|
|
epsilon=1e-7,
|
|
weight_decay=None,
|
|
clipnorm=None,
|
|
clipvalue=None,
|
|
global_clipnorm=None,
|
|
use_ema=False,
|
|
ema_momentum=0.99,
|
|
ema_overwrite_frequency=None,
|
|
name="adamax",
|
|
):
|
|
super().__init__(
|
|
learning_rate=learning_rate,
|
|
name=name,
|
|
weight_decay=weight_decay,
|
|
clipnorm=clipnorm,
|
|
clipvalue=clipvalue,
|
|
global_clipnorm=global_clipnorm,
|
|
use_ema=use_ema,
|
|
ema_momentum=ema_momentum,
|
|
ema_overwrite_frequency=ema_overwrite_frequency,
|
|
)
|
|
self.beta_1 = beta_1
|
|
self.beta_2 = beta_2
|
|
self.epsilon = epsilon
|
|
|
|
def build(self, var_list):
|
|
"""Initialize optimizer variables.
|
|
|
|
Adamax optimizer has 2 types of variables: momentums (denoted as m),
|
|
exponentially weighted infinity norm (denoted as u).
|
|
|
|
Args:
|
|
var_list: list of model variables to build Adamax variables on.
|
|
"""
|
|
if self.built:
|
|
return
|
|
super().build(var_list)
|
|
self._m = []
|
|
self._u = []
|
|
for var in var_list:
|
|
self._m.append(
|
|
self.add_variable_from_reference(
|
|
reference_variable=var, name="m"
|
|
)
|
|
)
|
|
self._u.append(
|
|
self.add_variable_from_reference(
|
|
reference_variable=var, name="u"
|
|
)
|
|
)
|
|
|
|
def update_step(self, gradient, variable, learning_rate):
|
|
"""Update step given gradient and the associated model variable."""
|
|
lr = ops.cast(learning_rate, variable.dtype)
|
|
gradient = ops.cast(gradient, variable.dtype)
|
|
local_step = ops.cast(self.iterations + 1, variable.dtype)
|
|
beta_1_power = ops.power(
|
|
ops.cast(self.beta_1, variable.dtype), local_step
|
|
)
|
|
|
|
m = self._m[self._get_variable_index(variable)]
|
|
u = self._u[self._get_variable_index(variable)]
|
|
|
|
m.assign(m + (gradient - m) * (1 - self.beta_1))
|
|
u.assign(ops.maximum(self.beta_2 * u, ops.abs(gradient)))
|
|
variable.assign(
|
|
variable - (lr * m) / ((1 - beta_1_power) * (u + self.epsilon))
|
|
)
|
|
|
|
def get_config(self):
|
|
config = super().get_config()
|
|
|
|
config.update(
|
|
{
|
|
"beta_1": self.beta_1,
|
|
"beta_2": self.beta_2,
|
|
"epsilon": self.epsilon,
|
|
}
|
|
)
|
|
return config
|
|
|
|
|
|
Adamax.__doc__ = Adamax.__doc__.replace(
|
|
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
|
|
)
|