Add demos
This commit is contained in:
parent
dbda705cda
commit
1a45e5cd17
112
demo_custom_jax_workflow.py
Normal file
112
demo_custom_jax_workflow.py
Normal file
@ -0,0 +1,112 @@
|
||||
import jax
|
||||
import numpy as np
|
||||
|
||||
from keras_core import Model
|
||||
from keras_core import backend
|
||||
from keras_core import initializers
|
||||
from keras_core import layers
|
||||
from keras_core import operations as ops
|
||||
from keras_core import optimizers
|
||||
|
||||
|
||||
class MyDense(layers.Layer):
|
||||
def __init__(self, units, name=None):
|
||||
super().__init__(name=name)
|
||||
self.units = units
|
||||
|
||||
def build(self, input_shape):
|
||||
input_dim = input_shape[-1]
|
||||
w_shape = (input_dim, self.units)
|
||||
w_value = initializers.GlorotUniform()(w_shape)
|
||||
self.w = backend.Variable(w_value, name="kernel")
|
||||
|
||||
b_shape = (self.units,)
|
||||
b_value = initializers.Zeros()(b_shape)
|
||||
self.b = backend.Variable(b_value, name="bias")
|
||||
|
||||
def call(self, inputs):
|
||||
return ops.matmul(inputs, self.w) + self.b
|
||||
|
||||
|
||||
class MyModel(Model):
|
||||
def __init__(self, hidden_dim, output_dim):
|
||||
super().__init__()
|
||||
self.dense1 = MyDense(hidden_dim)
|
||||
self.dense2 = MyDense(hidden_dim)
|
||||
self.dense3 = MyDense(output_dim)
|
||||
|
||||
def call(self, x):
|
||||
x = self.dense1(x)
|
||||
x = self.dense2(x)
|
||||
return self.dense3(x)
|
||||
|
||||
|
||||
def Dataset():
|
||||
for _ in range(20):
|
||||
yield (np.random.random((32, 128)), np.random.random((32, 16)))
|
||||
|
||||
|
||||
def loss_fn(y_true, y_pred):
|
||||
return ops.sum((y_true - y_pred) ** 2)
|
||||
|
||||
|
||||
model = MyModel(hidden_dim=256, output_dim=16)
|
||||
|
||||
optimizer = optimizers.SGD(learning_rate=0.0001)
|
||||
dataset = Dataset()
|
||||
|
||||
# # Build model
|
||||
x = ops.convert_to_tensor(np.random.random((1, 128)))
|
||||
model(x)
|
||||
# Build optimizer
|
||||
optimizer.build(model.trainable_variables)
|
||||
|
||||
|
||||
def compute_loss_and_updates(
|
||||
trainable_variables, non_trainable_variables, x, y
|
||||
):
|
||||
y_pred, non_trainable_variables = model.stateless_call(
|
||||
trainable_variables, non_trainable_variables, x
|
||||
)
|
||||
|
||||
loss = loss_fn(y, y_pred)
|
||||
return loss, non_trainable_variables
|
||||
|
||||
|
||||
grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True)
|
||||
|
||||
|
||||
@jax.jit
|
||||
def train_step(state, data):
|
||||
trainable_variables, non_trainable_variables, optimizer_variables = state
|
||||
x, y = data
|
||||
(loss, non_trainable_variables), grads = grad_fn(
|
||||
trainable_variables, non_trainable_variables, x, y
|
||||
)
|
||||
trainable_variables, optimizer_variables = optimizer.stateless_apply(
|
||||
grads, trainable_variables, optimizer_variables
|
||||
)
|
||||
# Return updated state
|
||||
return loss, (
|
||||
trainable_variables,
|
||||
non_trainable_variables,
|
||||
optimizer_variables,
|
||||
)
|
||||
|
||||
|
||||
trainable_variables = model.trainable_variables
|
||||
non_trainable_variables = model.non_trainable_variables
|
||||
optimizer_variables = optimizer.variables
|
||||
state = trainable_variables, non_trainable_variables, optimizer_variables
|
||||
# Training loop
|
||||
for data in dataset:
|
||||
loss, state = train_step(state, data)
|
||||
print("Loss:", loss)
|
||||
|
||||
# Post-processing model state update
|
||||
for variable, value in zip(model.trainable_variables, trainable_variables):
|
||||
variable.assign(value)
|
||||
for variable, value in zip(
|
||||
model.non_trainable_variables, non_trainable_variables
|
||||
):
|
||||
variable.assign(value)
|
64
demo_custom_layer_backend_agnostic.py
Normal file
64
demo_custom_layer_backend_agnostic.py
Normal file
@ -0,0 +1,64 @@
|
||||
import numpy as np
|
||||
|
||||
from keras_core import Model
|
||||
from keras_core import backend
|
||||
from keras_core import initializers
|
||||
from keras_core import layers
|
||||
from keras_core import losses
|
||||
from keras_core import metrics
|
||||
from keras_core import operations as ops
|
||||
from keras_core import optimizers
|
||||
|
||||
|
||||
class MyDense(layers.Layer):
|
||||
def __init__(self, units, name=None):
|
||||
super().__init__(name=name)
|
||||
self.units = units
|
||||
|
||||
def build(self, input_shape):
|
||||
input_dim = input_shape[-1]
|
||||
w_shape = (input_dim, self.units)
|
||||
w_value = initializers.GlorotUniform()(w_shape)
|
||||
self.w = backend.Variable(w_value, name="kernel")
|
||||
|
||||
b_shape = (self.units,)
|
||||
b_value = initializers.Zeros()(b_shape)
|
||||
self.b = backend.Variable(b_value, name="bias")
|
||||
|
||||
def call(self, inputs):
|
||||
# Use Keras ops to create backend-agnostic layers/metrics/etc.
|
||||
return ops.matmul(inputs, self.w) + self.b
|
||||
|
||||
|
||||
class MyModel(Model):
|
||||
def __init__(self, hidden_dim, output_dim):
|
||||
super().__init__()
|
||||
self.dense1 = MyDense(hidden_dim)
|
||||
self.dense2 = MyDense(hidden_dim)
|
||||
self.dense3 = MyDense(output_dim)
|
||||
|
||||
def call(self, x):
|
||||
x1 = self.dense1(x)
|
||||
x2 = self.dense2(x)
|
||||
# Why not use some ops here as well
|
||||
x = ops.concatenate([x1, x2], axis=-1)
|
||||
return self.dense3(x)
|
||||
|
||||
|
||||
model = MyModel(hidden_dim=256, output_dim=16)
|
||||
model.summary()
|
||||
|
||||
x = np.random.random((50000, 128))
|
||||
y = np.random.random((50000, 16))
|
||||
batch_size = 32
|
||||
epochs = 10
|
||||
|
||||
model.compile(
|
||||
optimizer=optimizers.SGD(learning_rate=0.001),
|
||||
loss=losses.MeanSquaredError(),
|
||||
metrics=[metrics.MeanSquaredError()],
|
||||
)
|
||||
history = model.fit(x, y, batch_size=batch_size, epochs=epochs)
|
||||
|
||||
print("History:")
|
||||
print(history.history)
|
78
demo_custom_tf_workflow.py
Normal file
78
demo_custom_tf_workflow.py
Normal file
@ -0,0 +1,78 @@
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from keras_core import Model
|
||||
from keras_core import backend
|
||||
from keras_core import initializers
|
||||
from keras_core import layers
|
||||
from keras_core import operations as ops
|
||||
from keras_core import optimizers
|
||||
|
||||
|
||||
class MyDense(layers.Layer):
|
||||
def __init__(self, units, name=None):
|
||||
super().__init__(name=name)
|
||||
self.units = units
|
||||
|
||||
def build(self, input_shape):
|
||||
input_dim = input_shape[-1]
|
||||
w_shape = (input_dim, self.units)
|
||||
w_value = initializers.GlorotUniform()(w_shape)
|
||||
self.w = backend.Variable(w_value, name="kernel")
|
||||
|
||||
b_shape = (self.units,)
|
||||
b_value = initializers.Zeros()(b_shape)
|
||||
self.b = backend.Variable(b_value, name="bias")
|
||||
|
||||
def call(self, inputs):
|
||||
return ops.matmul(inputs, self.w) + self.b
|
||||
|
||||
|
||||
class MyModel(Model):
|
||||
def __init__(self, hidden_dim, output_dim):
|
||||
super().__init__()
|
||||
self.dense1 = MyDense(hidden_dim)
|
||||
self.dense2 = MyDense(hidden_dim)
|
||||
self.dense3 = MyDense(output_dim)
|
||||
|
||||
def call(self, x):
|
||||
x = self.dense1(x)
|
||||
x = self.dense2(x)
|
||||
return self.dense3(x)
|
||||
|
||||
|
||||
def Dataset():
|
||||
for _ in range(20):
|
||||
yield (
|
||||
np.random.random((32, 128)).astype("float32"),
|
||||
np.random.random((32, 16)).astype("float32"),
|
||||
)
|
||||
|
||||
|
||||
def loss_fn(y_true, y_pred):
|
||||
return ops.sum((y_true - y_pred) ** 2)
|
||||
|
||||
|
||||
model = MyModel(hidden_dim=256, output_dim=16)
|
||||
|
||||
optimizer = optimizers.SGD(learning_rate=0.0001)
|
||||
dataset = Dataset()
|
||||
|
||||
|
||||
@tf.function(jit_compile=True)
|
||||
def train_step(data):
|
||||
x, y = data
|
||||
with tf.GradientTape() as tape:
|
||||
y_pred = model(x)
|
||||
loss = loss_fn(y, y_pred)
|
||||
# !! Glitch to be resolved !!
|
||||
gradients = tape.gradient(
|
||||
loss, [v.value for v in model.trainable_variables]
|
||||
)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
||||
return loss
|
||||
|
||||
|
||||
for data in dataset:
|
||||
loss = train_step(data)
|
||||
print("Loss:", float(loss))
|
30
demo_functional.py
Normal file
30
demo_functional.py
Normal file
@ -0,0 +1,30 @@
|
||||
import numpy as np
|
||||
|
||||
from keras_core import layers
|
||||
from keras_core import losses
|
||||
from keras_core import metrics
|
||||
from keras_core import optimizers
|
||||
from keras_core.models import Functional
|
||||
|
||||
inputs = layers.Input((128,), batch_size=32)
|
||||
x = layers.Dense(256)(inputs)
|
||||
x = layers.Dense(256)(x)
|
||||
x = layers.Dense(256)(x)
|
||||
outputs = layers.Dense(16)(x)
|
||||
model = Functional(inputs, outputs)
|
||||
model.summary()
|
||||
|
||||
x = np.random.random((50000, 128))
|
||||
y = np.random.random((50000, 16))
|
||||
batch_size = 32
|
||||
epochs = 10
|
||||
|
||||
model.compile(
|
||||
optimizer=optimizers.SGD(learning_rate=0.001),
|
||||
loss=losses.MeanSquaredError(),
|
||||
metrics=[metrics.MeanSquaredError()],
|
||||
)
|
||||
history = model.fit(x, y, batch_size=batch_size, epochs=epochs)
|
||||
|
||||
print("History:")
|
||||
print(history.history)
|
39
demo_subclass.py
Normal file
39
demo_subclass.py
Normal file
@ -0,0 +1,39 @@
|
||||
import numpy as np
|
||||
|
||||
from keras_core import Model
|
||||
from keras_core import layers
|
||||
from keras_core import losses
|
||||
from keras_core import metrics
|
||||
from keras_core import optimizers
|
||||
|
||||
|
||||
class MyModel(Model):
|
||||
def __init__(self, hidden_dim, output_dim):
|
||||
super().__init__()
|
||||
self.dense1 = layers.Dense(hidden_dim)
|
||||
self.dense2 = layers.Dense(hidden_dim)
|
||||
self.dense3 = layers.Dense(output_dim)
|
||||
|
||||
def call(self, x):
|
||||
x = self.dense1(x)
|
||||
x = self.dense2(x)
|
||||
return self.dense3(x)
|
||||
|
||||
|
||||
model = MyModel(hidden_dim=256, output_dim=16)
|
||||
model.summary()
|
||||
|
||||
x = np.random.random((50000, 128))
|
||||
y = np.random.random((50000, 16))
|
||||
batch_size = 32
|
||||
epochs = 10
|
||||
|
||||
model.compile(
|
||||
optimizer=optimizers.SGD(learning_rate=0.001),
|
||||
loss=losses.MeanSquaredError(),
|
||||
metrics=[metrics.MeanSquaredError()],
|
||||
)
|
||||
history = model.fit(x, y, batch_size=batch_size, epochs=epochs)
|
||||
|
||||
print("History:")
|
||||
print(history.history)
|
@ -1,2 +1,5 @@
|
||||
from keras_core import backend
|
||||
from keras_core import layers
|
||||
from keras_core import models
|
||||
from keras_core import operations
|
||||
from keras_core.models import Model
|
||||
|
@ -273,7 +273,8 @@ class Layer(Operation):
|
||||
raise ValueError(
|
||||
"Only input tensors may be passed as "
|
||||
"positional arguments. The following argument value "
|
||||
f"should be passed as a keyword argument: {arg}"
|
||||
f"should be passed as a keyword argument: {arg} "
|
||||
f"(of type {type(arg)})"
|
||||
)
|
||||
|
||||
# 4. Check input spec for 1st positional arg.
|
||||
@ -420,7 +421,7 @@ class Layer(Operation):
|
||||
losses.extend(layer._losses)
|
||||
weight_regularization_losses = []
|
||||
for v in self.trainable_weights:
|
||||
regularizer = getattr(v, "regularizer")
|
||||
regularizer = getattr(v, "regularizer", None)
|
||||
if regularizer:
|
||||
weight_regularization_losses.append(regularizer(v))
|
||||
losses.extend(weight_regularization_losses)
|
||||
@ -552,9 +553,11 @@ class Layer(Operation):
|
||||
if id(layer) in seen_object_ids:
|
||||
continue
|
||||
seen_object_ids.add(id(layer))
|
||||
layers.append(layer)
|
||||
# Introspect recursively through sublayers.
|
||||
if recursive:
|
||||
deque.extendleft(layer._layers)
|
||||
return layers
|
||||
|
||||
|
||||
def get_arguments_dict(fn, *args, **kwargs):
|
||||
|
Loading…
Reference in New Issue
Block a user