diff --git a/README.md b/README.md index 7d8bb0e5c..87e42b326 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ -[![](https://github.com/keras-team/keras-core/workflows/Tests/badge.svg?branch=main)](https://github.com/keras-team/keras-core/actions?query=workflow%3ATests+branch%3Amain) -[![](https://badge.fury.io/py/keras-core.svg)](https://badge.fury.io/py/keras-core) +[![](https://github.com/keras-team/keras/workflows/Tests/badge.svg?branch=main)](https://github.com/keras-team/keras/actions?query=workflow%3ATests+branch%3Amain) +[![](https://badge.fury.io/py/keras.svg)](https://badge.fury.io/py/keras) -# Keras Core: A new multi-backend Keras +# Keras 3: A new multi-backend Keras -Keras Core is a new multi-backend implementation of the Keras API, with support for TensorFlow, JAX, and PyTorch. +Keras 3 is a new multi-backend implementation of the Keras API, with support for TensorFlow, JAX, and PyTorch. **WARNING:** At this time, this package is experimental. It has rough edges and not everything might work as expected. @@ -13,7 +13,7 @@ Once ready, this package will become Keras 3.0 and subsume `tf.keras`. ## Local installation -Keras Core is compatible with Linux and MacOS systems. To install a local development version: +Keras 3 is compatible with Linux and MacOS systems. To install a local development version: 1. Install dependencies: @@ -28,7 +28,7 @@ python pip_build.py --install ``` You should also install your backend of choice: `tensorflow`, `jax`, or `torch`. -Note that `tensorflow` is required for using certain Keras Core features: certain preprocessing layers as +Note that `tensorflow` is required for using certain Keras 3 features: certain preprocessing layers as well as `tf.data` pipelines. ## Configuring your backend @@ -46,16 +46,16 @@ In Colab, you can do: import os os.environ["KERAS_BACKEND"] = "jax" -import keras_core as keras +import keras as keras ``` -**Note:** The backend must be configured before importing `keras_core`, and the backend cannot be changed after +**Note:** The backend must be configured before importing `keras`, and the backend cannot be changed after the package has been imported. ## Backwards compatibility -Keras Core is intended to work as a drop-in replacement for `tf.keras` (when using the TensorFlow backend). Just take your -existing `tf.keras` code, change the `keras` imports to `keras_core`, make sure that your calls to `model.save()` are using +Keras 3 is intended to work as a drop-in replacement for `tf.keras` (when using the TensorFlow backend). Just take your +existing `tf.keras` code, change the `keras` imports to `keras`, make sure that your calls to `model.save()` are using the up-to-date `.keras` format, and you're done. If your `tf.keras` model does not include custom components, you can start running it on top of JAX or PyTorch immediately. @@ -66,7 +66,7 @@ to a backend-agnostic implementation in just a few minutes. In addition, Keras models can consume datasets in any format, regardless of the backend you're using: you can train your models with your existing `tf.data.Dataset` pipelines or PyTorch `DataLoaders`. -## Why use Keras Core? +## Why use Keras 3? - Run your high-level Keras workflows on top of any framework -- benefiting at will from the advantages of each framework, e.g. the scalability and performance of JAX or the production ecosystem options of TensorFlow. diff --git a/benchmarks/layer_benchmark/README.md b/benchmarks/layer_benchmark/README.md index 9b96f9de1..6ca51d1fd 100644 --- a/benchmarks/layer_benchmark/README.md +++ b/benchmarks/layer_benchmark/README.md @@ -1,7 +1,7 @@ # Benchmark the layer performance This directory contains benchmarks to compare the performance of -`keras_core.layers.XXX` and `tf.keras.layers.XXX`. We compare the performance of +`keras.layers.XXX` and `tf.keras.layers.XXX`. We compare the performance of both the forward pass and train step (forward & backward pass). To run the benchmark, use the command below and change the flags according to diff --git a/benchmarks/layer_benchmark/base_benchmark.py b/benchmarks/layer_benchmark/base_benchmark.py index ed65f0b7c..42aac4488 100644 --- a/benchmarks/layer_benchmark/base_benchmark.py +++ b/benchmarks/layer_benchmark/base_benchmark.py @@ -4,7 +4,7 @@ import numpy as np import tensorflow as tf from absl import flags -import keras_core +import keras FLAGS = flags.FLAGS @@ -66,7 +66,7 @@ class BenchmarkMetricsCallback: self.state["throughput"] = throughput -class KerasCoreBenchmarkMetricsCallback(keras_core.callbacks.Callback): +class KerasCoreBenchmarkMetricsCallback(keras.callbacks.Callback): def __init__(self, start_batch=1, stop_batch=None): self._callback = BenchmarkMetricsCallback(start_batch, stop_batch) @@ -108,21 +108,21 @@ class LayerBenchmark: input_shape, flat_call_inputs=True, jit_compile=True, - keras_core_layer=None, + keras_layer=None, tf_keras_layer=None, ): self.layer_name = layer_name - _keras_core_layer_class = getattr(keras_core.layers, layer_name) + _keras_layer_class = getattr(keras.layers, layer_name) _tf_keras_layer_class = getattr(tf.keras.layers, layer_name) - if keras_core_layer is None: - # Sometimes you want to initialize the keras_core layer and tf_keras + if keras_layer is None: + # Sometimes you want to initialize the keras layer and tf_keras # layer in a different way. For example, `Bidirectional` layer, - # which takes in `keras_core.layers.Layer` and + # which takes in `keras.layers.Layer` and # `tf.keras.layer.Layer` separately. - self._keras_core_layer = _keras_core_layer_class(**init_args) + self._keras_layer = _keras_layer_class(**init_args) else: - self._keras_core_layer = keras_core_layer + self._keras_layer = keras_layer if tf_keras_layer is None: self._tf_keras_layer = _tf_keras_layer_class(**init_args) @@ -130,14 +130,14 @@ class LayerBenchmark: self._tf_keras_layer = tf_keras_layer self.input_shape = input_shape - self._keras_core_model = self._build_keras_core_model( + self._keras_model = self._build_keras_model( input_shape, flat_call_inputs ) self._tf_keras_model = self._build_tf_keras_model( input_shape, flat_call_inputs ) - self._keras_core_model.compile( + self._keras_model.compile( loss="mse", optimizer="sgd", jit_compile=jit_compile ) self._tf_keras_model.compile( @@ -148,19 +148,19 @@ class LayerBenchmark: self.jit_compile = jit_compile self.input_shape = input_shape - def _build_keras_core_model(self, input_shape, flat_call_inputs=True): + def _build_keras_model(self, input_shape, flat_call_inputs=True): inputs = [] if not isinstance(input_shape[0], (tuple, list)): input_shape = [input_shape] for shape in input_shape: - inputs.append(keras_core.Input(shape=shape)) + inputs.append(keras.Input(shape=shape)) if flat_call_inputs: - outputs = self._keras_core_layer(*inputs) + outputs = self._keras_layer(*inputs) else: - outputs = self._keras_core_layer(inputs) - return keras_core.Model(inputs=inputs, outputs=outputs) + outputs = self._keras_layer(inputs) + return keras.Model(inputs=inputs, outputs=outputs) def _build_tf_keras_model(self, input_shape, flat_call_inputs=True): inputs = [] @@ -195,7 +195,7 @@ class LayerBenchmark: stop_batch=num_iterations ) - self._keras_core_model.predict( + self._keras_model.predict( data, batch_size=batch_size, callbacks=[callback], @@ -207,15 +207,15 @@ class LayerBenchmark: callbacks=[tf_keras_callback], ) - keras_core_throughput = ( + keras_throughput = ( callback._callback.state["throughput"] * batch_size ) tf_keras_throughput = ( tf_keras_callback._callback.state["throughput"] * batch_size ) print( - f"Keras Core throughput of forward pass of {self.layer_name}: " - f"{keras_core_throughput:.2f} samples/sec." + f"Keras 3 throughput of forward pass of {self.layer_name}: " + f"{keras_throughput:.2f} samples/sec." ) print( f"TF Keras throughput of forward pass of {self.layer_name}: " @@ -240,15 +240,15 @@ class LayerBenchmark: if self.flat_call_inputs: # Scale by a small factor to avoid zero gradients. label = ( - keras_core.backend.convert_to_numpy( - self._keras_core_layer(*data) + keras.backend.convert_to_numpy( + self._keras_layer(*data) ) * 1.001 ) else: label = ( - keras_core.backend.convert_to_numpy( - self._keras_core_layer(data) + keras.backend.convert_to_numpy( + self._keras_layer(data) ) * 1.001 ) @@ -259,7 +259,7 @@ class LayerBenchmark: stop_batch=num_iterations ) - self._keras_core_model.fit( + self._keras_model.fit( data, label, batch_size=batch_size, @@ -272,15 +272,15 @@ class LayerBenchmark: callbacks=[tf_keras_callback], ) - keras_core_throughput = ( + keras_throughput = ( callback._callback.state["throughput"] * batch_size ) tf_keras_throughput = ( tf_keras_callback._callback.state["throughput"] * batch_size ) print( - f"Keras Core throughput of forward & backward pass of " - f"{self.layer_name}: {keras_core_throughput:.2f} samples/sec." + f"Keras 3 throughput of forward & backward pass of " + f"{self.layer_name}: {keras_throughput:.2f} samples/sec." ) print( f"TF Keras throughput of forward & backward pass of " diff --git a/benchmarks/layer_benchmark/rnn_benchmark.py b/benchmarks/layer_benchmark/rnn_benchmark.py index 8877e19cc..a34d40c2e 100644 --- a/benchmarks/layer_benchmark/rnn_benchmark.py +++ b/benchmarks/layer_benchmark/rnn_benchmark.py @@ -16,7 +16,7 @@ import tensorflow as tf from absl import app from absl import flags -import keras_core +import keras from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark FLAGS = flags.FLAGS @@ -194,8 +194,8 @@ def benchmark_bidirectional( ): layer_name = "Bidirectional" init_args = {} - keras_core_layer = keras_core.layers.Bidirectional( - keras_core.layers.LSTM(32) + keras_layer = keras.layers.Bidirectional( + keras.layers.LSTM(32) ) tf_keras_layer = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)) benchmark = LayerBenchmark( @@ -203,7 +203,7 @@ def benchmark_bidirectional( init_args, input_shape=[256, 256], jit_compile=jit_compile, - keras_core_layer=keras_core_layer, + keras_layer=keras_layer, tf_keras_layer=tf_keras_layer, ) @@ -225,8 +225,8 @@ def benchmark_time_distributed( ): layer_name = "TimeDistributed" init_args = {} - keras_core_layer = keras_core.layers.TimeDistributed( - keras_core.layers.Conv2D(16, (3, 3)) + keras_layer = keras.layers.TimeDistributed( + keras.layers.Conv2D(16, (3, 3)) ) tf_keras_layer = tf.keras.layers.TimeDistributed( tf.keras.layers.Conv2D(16, (3, 3)) @@ -236,7 +236,7 @@ def benchmark_time_distributed( init_args, input_shape=[10, 32, 32, 3], jit_compile=jit_compile, - keras_core_layer=keras_core_layer, + keras_layer=keras_layer, tf_keras_layer=tf_keras_layer, ) diff --git a/benchmarks/model_benchmark/benchmark_utils.py b/benchmarks/model_benchmark/benchmark_utils.py index 51bd52266..dafba9205 100644 --- a/benchmarks/model_benchmark/benchmark_utils.py +++ b/benchmarks/model_benchmark/benchmark_utils.py @@ -1,9 +1,9 @@ import time -import keras_core +import keras -class BenchmarkMetricsCallback(keras_core.callbacks.Callback): +class BenchmarkMetricsCallback(keras.callbacks.Callback): def __init__(self, start_batch=1, stop_batch=None): self.start_batch = start_batch self.stop_batch = stop_batch diff --git a/benchmarks/model_benchmark/bert_benchmark.py b/benchmarks/model_benchmark/bert_benchmark.py index 4d7b2a3df..c73737203 100644 --- a/benchmarks/model_benchmark/bert_benchmark.py +++ b/benchmarks/model_benchmark/bert_benchmark.py @@ -21,7 +21,7 @@ from absl import flags from absl import logging from model_benchmark.benchmark_utils import BenchmarkMetricsCallback -import keras_core as keras +import keras as keras flags.DEFINE_string("model_size", "small", "The size of model to benchmark.") flags.DEFINE_string( diff --git a/benchmarks/model_benchmark/image_classification_benchmark.py b/benchmarks/model_benchmark/image_classification_benchmark.py index ddb1a330e..b99b98c3c 100644 --- a/benchmarks/model_benchmark/image_classification_benchmark.py +++ b/benchmarks/model_benchmark/image_classification_benchmark.py @@ -27,7 +27,7 @@ from absl import flags from absl import logging from model_benchmark.benchmark_utils import BenchmarkMetricsCallback -import keras_core as keras +import keras as keras flags.DEFINE_string("model", "EfficientNetV2B0", "The model to benchmark.") flags.DEFINE_integer("epochs", 1, "The number of epochs.") diff --git a/benchmarks/torch_ctl_benchmark/conv_model_benchmark.py b/benchmarks/torch_ctl_benchmark/conv_model_benchmark.py index b46162ffe..013cf8025 100644 --- a/benchmarks/torch_ctl_benchmark/conv_model_benchmark.py +++ b/benchmarks/torch_ctl_benchmark/conv_model_benchmark.py @@ -9,9 +9,9 @@ import torch import torch.nn as nn import torch.optim as optim -import keras_core +import keras from benchmarks.torch_ctl_benchmark.benchmark_utils import train_loop -from keras_core import layers +from keras import layers num_classes = 2 input_shape = (3, 256, 256) @@ -55,8 +55,8 @@ class TorchModel(torch.nn.Module): return x -def run_keras_core_custom_training_loop(): - keras_model = keras_core.Sequential( +def run_keras_custom_training_loop(): + keras_model = keras.Sequential( [ layers.Input(shape=input_shape), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), @@ -74,7 +74,7 @@ def run_keras_core_custom_training_loop(): num_epochs=num_epochs, optimizer=optimizer, loss_fn=loss_fn, - framework="keras_core", + framework="keras", ) @@ -93,5 +93,5 @@ def run_torch_custom_training_loop(): if __name__ == "__main__": - run_keras_core_custom_training_loop() + run_keras_custom_training_loop() run_torch_custom_training_loop() diff --git a/benchmarks/torch_ctl_benchmark/dense_model_benchmark.py b/benchmarks/torch_ctl_benchmark/dense_model_benchmark.py index afc8af925..495ea633c 100644 --- a/benchmarks/torch_ctl_benchmark/dense_model_benchmark.py +++ b/benchmarks/torch_ctl_benchmark/dense_model_benchmark.py @@ -9,9 +9,9 @@ import torch import torch.nn as nn import torch.optim as optim -import keras_core +import keras from benchmarks.torch_ctl_benchmark.benchmark_utils import train_loop -from keras_core import layers +from keras import layers num_classes = 2 input_shape = (8192,) @@ -55,8 +55,8 @@ class TorchModel(torch.nn.Module): return x -def run_keras_core_custom_training_loop(): - keras_model = keras_core.Sequential( +def run_keras_custom_training_loop(): + keras_model = keras.Sequential( [ layers.Input(shape=input_shape), layers.Dense(64, activation="relu"), @@ -73,7 +73,7 @@ def run_keras_core_custom_training_loop(): num_epochs=num_epochs, optimizer=optimizer, loss_fn=loss_fn, - framework="keras_core", + framework="keras", ) @@ -92,5 +92,5 @@ def run_torch_custom_training_loop(): if __name__ == "__main__": - run_keras_core_custom_training_loop() + run_keras_custom_training_loop() run_torch_custom_training_loop() diff --git a/codecov.yml b/codecov.yml index 63ad8099c..d6453385f 100644 --- a/codecov.yml +++ b/codecov.yml @@ -26,10 +26,10 @@ flag_management: - type: patch target: auto individual_flags: - - name: keras_core + - name: keras paths: - - keras_core - - name: keras_core.applications + - keras + - name: keras.applications paths: - - keras_core/applications + - keras/applications carryforward: true diff --git a/conftest.py b/conftest.py index 25cbb4398..d139e8875 100644 --- a/conftest.py +++ b/conftest.py @@ -8,7 +8,7 @@ except ImportError: import pytest -from keras_core.backend import backend +from keras.backend import backend def pytest_configure(config): diff --git a/examples/demo_custom_jax_workflow.py b/examples/demo_custom_jax_workflow.py index 9f18ddf7f..196262888 100644 --- a/examples/demo_custom_jax_workflow.py +++ b/examples/demo_custom_jax_workflow.py @@ -7,12 +7,12 @@ os.environ["KERAS_BACKEND"] = "jax" import jax import numpy as np -from keras_core import Model -from keras_core import backend -from keras_core import initializers -from keras_core import layers -from keras_core import ops -from keras_core import optimizers +from keras import Model +from keras import backend +from keras import initializers +from keras import layers +from keras import ops +from keras import optimizers class MyDense(layers.Layer): diff --git a/examples/demo_custom_layer_backend_agnostic.py b/examples/demo_custom_layer_backend_agnostic.py index 784c70c6e..1b24aa592 100644 --- a/examples/demo_custom_layer_backend_agnostic.py +++ b/examples/demo_custom_layer_backend_agnostic.py @@ -1,13 +1,13 @@ import numpy as np -import keras_core -from keras_core import Model -from keras_core import initializers -from keras_core import layers -from keras_core import losses -from keras_core import metrics -from keras_core import ops -from keras_core import optimizers +import keras +from keras import Model +from keras import initializers +from keras import layers +from keras import losses +from keras import metrics +from keras import ops +from keras import optimizers class MyDense(layers.Layer): @@ -43,11 +43,11 @@ class MyDropout(layers.Layer): # Use seed_generator for managing RNG state. # It is a state element and its seed variable is # tracked as part of `layer.variables`. - self.seed_generator = keras_core.random.SeedGenerator(1337) + self.seed_generator = keras.random.SeedGenerator(1337) def call(self, inputs): - # Use `keras_core.random` for random ops. - return keras_core.random.dropout( + # Use `keras.random` for random ops. + return keras.random.dropout( inputs, self.rate, seed=self.seed_generator ) diff --git a/examples/demo_custom_tf_workflow.py b/examples/demo_custom_tf_workflow.py index 1082eafc5..b8fc2b7b6 100644 --- a/examples/demo_custom_tf_workflow.py +++ b/examples/demo_custom_tf_workflow.py @@ -7,12 +7,12 @@ os.environ["KERAS_BACKEND"] = "tensorflow" import numpy as np import tensorflow as tf -from keras_core import Model -from keras_core import backend -from keras_core import initializers -from keras_core import layers -from keras_core import ops -from keras_core import optimizers +from keras import Model +from keras import backend +from keras import initializers +from keras import layers +from keras import ops +from keras import optimizers class MyDense(layers.Layer): diff --git a/examples/demo_custom_torch_workflow.py b/examples/demo_custom_torch_workflow.py index e7d971a32..56f5f3065 100644 --- a/examples/demo_custom_torch_workflow.py +++ b/examples/demo_custom_torch_workflow.py @@ -7,8 +7,8 @@ os.environ["KERAS_BACKEND"] = "torch" import torch import torch.nn as nn import torch.optim as optim -from keras_core import layers -import keras_core +from keras import layers +import keras import numpy as np # Model / data parameters @@ -19,7 +19,7 @@ batch_size = 64 num_epochs = 1 # Load the data and split it between train and test sets -(x_train, y_train), (x_test, y_test) = keras_core.datasets.mnist.load_data() +(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 @@ -32,7 +32,7 @@ print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") # Create the Keras model -model = keras_core.Sequential( +model = keras.Sequential( [ layers.Input(shape=(28, 28, 1)), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), @@ -102,7 +102,7 @@ train(model, train_loader, num_epochs, optimizer, loss_fn) class MyModel(nn.Module): def __init__(self): super().__init__() - self.model = keras_core.Sequential( + self.model = keras.Sequential( [ layers.Input(shape=(28, 28, 1)), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), diff --git a/examples/demo_jax_distributed.py b/examples/demo_jax_distributed.py index ae7e2bde8..88ebe478f 100644 --- a/examples/demo_jax_distributed.py +++ b/examples/demo_jax_distributed.py @@ -11,7 +11,7 @@ pp = pprint.PrettyPrinter() import jax import jax.numpy as jnp import tensorflow as tf # just for tf.data -import keras_core as keras # Keras multi-backend +import keras as keras # Keras multi-backend import numpy as np from tqdm import tqdm diff --git a/examples/demo_mnist_convnet.py b/examples/demo_mnist_convnet.py index f5f4e3f4d..ce08b2b92 100644 --- a/examples/demo_mnist_convnet.py +++ b/examples/demo_mnist_convnet.py @@ -1,14 +1,14 @@ import numpy as np -import keras_core -from keras_core import layers -from keras_core.utils import to_categorical +import keras +from keras import layers +from keras.utils import to_categorical # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) # Load the data and split it between train and test sets -(x_train, y_train), (x_test, y_test) = keras_core.datasets.mnist.load_data() +(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 @@ -28,7 +28,7 @@ y_test = to_categorical(y_test, num_classes) batch_size = 128 epochs = 3 -model = keras_core.Sequential( +model = keras.Sequential( [ layers.Input(shape=input_shape), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), diff --git a/examples/demo_subclass.py b/examples/demo_subclass.py index 0283ee0ea..ea22f063a 100644 --- a/examples/demo_subclass.py +++ b/examples/demo_subclass.py @@ -1,10 +1,10 @@ import numpy as np -from keras_core import Model -from keras_core import layers -from keras_core import losses -from keras_core import metrics -from keras_core import optimizers +from keras import Model +from keras import layers +from keras import losses +from keras import metrics +from keras import optimizers class MyModel(Model): diff --git a/examples/demo_torch_multi_gpu.py b/examples/demo_torch_multi_gpu.py index a7084252b..72f3058a8 100644 --- a/examples/demo_torch_multi_gpu.py +++ b/examples/demo_torch_multi_gpu.py @@ -7,8 +7,8 @@ os.environ["KERAS_BACKEND"] = "torch" import torch import torch.nn as nn import torch.optim as optim -from keras_core import layers -import keras_core +from keras import layers +import keras import numpy as np import torch.multiprocessing as mp @@ -27,7 +27,7 @@ num_epochs = 1 def get_data(): # Load the data and split it between train and test sets - (x_train, y_train), (x_test, y_test) = keras_core.datasets.mnist.load_data() + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 @@ -48,7 +48,7 @@ def get_data(): def get_model(): # Create the Keras model - model = keras_core.Sequential( + model = keras.Sequential( [ layers.Input(shape=(28, 28, 1)), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), @@ -66,7 +66,7 @@ def get_model(): class MyModel(nn.Module): def __init__(self): super().__init__() - self.model = keras_core.Sequential( + self.model = keras.Sequential( [ layers.Input(shape=(28, 28, 1)), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), diff --git a/examples/keras_io/generative/text_generation_gpt.py b/examples/keras_io/generative/text_generation_gpt.py index 7e24bea37..0b4bc3731 100644 --- a/examples/keras_io/generative/text_generation_gpt.py +++ b/examples/keras_io/generative/text_generation_gpt.py @@ -41,7 +41,7 @@ import os os.environ["KERAS_BACKEND"] = "jax" import keras_nlp -import keras_core as keras +import keras as keras import tensorflow.data as tf_data import tensorflow.strings as tf_strings diff --git a/examples/keras_io/generative/text_generation_with_miniature_gpt.py b/examples/keras_io/generative/text_generation_with_miniature_gpt.py index 6b0f6b5a6..ba51c74cc 100644 --- a/examples/keras_io/generative/text_generation_with_miniature_gpt.py +++ b/examples/keras_io/generative/text_generation_with_miniature_gpt.py @@ -42,10 +42,10 @@ with TensorFlow 2.3 or higher. import os os.environ['KERAS_BACKEND'] = 'tensorflow' -import keras_core as keras -from keras_core import layers -from keras_core import ops -from keras_core.layers import TextVectorization +import keras as keras +from keras import layers +from keras import ops +from keras.layers import TextVectorization import numpy as np import os import string diff --git a/examples/keras_io/nlp/addition_rnn.py b/examples/keras_io/nlp/addition_rnn.py index a8728a81e..5e9f1bba4 100644 --- a/examples/keras_io/nlp/addition_rnn.py +++ b/examples/keras_io/nlp/addition_rnn.py @@ -46,8 +46,8 @@ Five digits (reversed): ## Setup """ -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import numpy as np # Parameters for the model and dataset. diff --git a/examples/keras_io/nlp/bidirectional_lstm_imdb.py b/examples/keras_io/nlp/bidirectional_lstm_imdb.py index 0d2350d23..2e3bc3d4b 100644 --- a/examples/keras_io/nlp/bidirectional_lstm_imdb.py +++ b/examples/keras_io/nlp/bidirectional_lstm_imdb.py @@ -11,8 +11,8 @@ Accelerator: GPU """ import numpy as np -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers max_features = 20000 # Only consider the top 20k words maxlen = 200 # Only consider the first 200 words of each movie review diff --git a/examples/keras_io/nlp/multiple_choice_task_with_transfer_learning.py b/examples/keras_io/nlp/multiple_choice_task_with_transfer_learning.py index 088db109a..b455fa76a 100644 --- a/examples/keras_io/nlp/multiple_choice_task_with_transfer_learning.py +++ b/examples/keras_io/nlp/multiple_choice_task_with_transfer_learning.py @@ -21,7 +21,7 @@ unlike question answering. We will use SWAG dataset to demonstrate this example. """ """shell -pip install -q keras-core --upgrade +pip install -q keras --upgrade pip install -q keras-nlp --upgrade """ @@ -29,7 +29,7 @@ import os os.environ["KERAS_BACKEND"] = "jax" # or "tensorflow" or "torch" import keras_nlp -import keras_core as keras +import keras as keras import tensorflow as tf import numpy as np @@ -194,7 +194,7 @@ content of the options themselves, rather than being influenced by their positio **Note:** Even though `option_shuffle` function is written in pure tensorflow, it can be used with any backend (e.g. JAX, PyTorch) as it is only used -in `tf.data.Dataset` pipeline which is compatible with Keras Core routines. +in `tf.data.Dataset` pipeline which is compatible with Keras 3 routines. """ diff --git a/examples/keras_io/nlp/neural_machine_translation_with_keras_nlp.py b/examples/keras_io/nlp/neural_machine_translation_with_keras_nlp.py index d7e82b7ab..fdc9a7e99 100644 --- a/examples/keras_io/nlp/neural_machine_translation_with_keras_nlp.py +++ b/examples/keras_io/nlp/neural_machine_translation_with_keras_nlp.py @@ -52,8 +52,8 @@ import keras_nlp import pathlib import random -import keras_core as keras -from keras_core import ops +import keras as keras +from keras import ops import tensorflow.data as tf_data from tensorflow_text.tools.wordpiece_vocab import ( diff --git a/examples/keras_io/nlp/neural_machine_translation_with_transformer.py b/examples/keras_io/nlp/neural_machine_translation_with_transformer.py index d56991007..4a2db8c0b 100644 --- a/examples/keras_io/nlp/neural_machine_translation_with_transformer.py +++ b/examples/keras_io/nlp/neural_machine_translation_with_transformer.py @@ -53,10 +53,10 @@ import numpy as np import tensorflow.data as tf_data import tensorflow.strings as tf_strings -import keras_core as keras -from keras_core import layers -from keras_core import ops -from keras_core.layers import TextVectorization +import keras as keras +from keras import layers +from keras import ops +from keras.layers import TextVectorization """ ## Downloading the data @@ -231,7 +231,7 @@ sure that it only uses information from target tokens 0 to N when predicting tok (otherwise, it could use information from the future, which would result in a model that cannot be used at inference time). """ -import keras_core.ops as ops +import keras.ops as ops class TransformerEncoder(layers.Layer): def __init__(self, embed_dim, dense_dim, num_heads, **kwargs): diff --git a/examples/keras_io/nlp/pretraining_BERT.py b/examples/keras_io/nlp/pretraining_BERT.py index 8a192259d..39f4d106e 100644 --- a/examples/keras_io/nlp/pretraining_BERT.py +++ b/examples/keras_io/nlp/pretraining_BERT.py @@ -84,7 +84,7 @@ import nltk import random import logging -import keras_core as keras +import keras as keras nltk.download("punkt") # Set random seed diff --git a/examples/keras_io/pytorch/torchvision_keras.py b/examples/keras_io/pytorch/torchvision_keras.py index dfaefb11f..bbb8b4dbb 100644 --- a/examples/keras_io/pytorch/torchvision_keras.py +++ b/examples/keras_io/pytorch/torchvision_keras.py @@ -21,7 +21,7 @@ Dataloaders](https://pytorch.org/tutorials/beginner/basics/data_tutorial.html). ### References: - [Customizing what happens in `fit()` with -PyTorch](https://keras.io/keras_core/guides/custom_train_step_in_torch/) +PyTorch](https://keras.io/keras/guides/custom_train_step_in_torch/) - [PyTorch Datasets and Dataloaders](https://pytorch.org/tutorials/beginner/basics/data_tutorial.html) - [Transfer learning for Computer Vision using @@ -46,8 +46,8 @@ import torch.nn.functional as F import torchvision from torchvision import datasets, models, transforms -import keras_core as keras -from keras_core.layers import TorchModuleWrapper +import keras as keras +from keras.layers import TorchModuleWrapper """ ## Define the Hyperparameters @@ -165,9 +165,9 @@ resnet_18.fc = nn.Identity() """ Even though Keras supports PyTorch as a backend, it does not mean that we can nest torch -modules inside a [`keras_core.Model`](https://keras.io/keras_core/api/models/), because +modules inside a [`keras.Model`](https://keras.io/keras/api/models/), because trainable variables inside a Keras Model is tracked exclusively via [Keras -Layers](https://keras.io/keras_core/api/layers/). +Layers](https://keras.io/keras/api/layers/). KerasCore provides us with a feature called `TorchModuleWrapper` which enables us to do exactly this. The `TorchModuleWrapper` is a Keras Layer that accepts a torch module and diff --git a/examples/keras_io/structured_data/collaborative_filtering_movielens.py b/examples/keras_io/structured_data/collaborative_filtering_movielens.py index 41e89ebb7..de8973aa8 100644 --- a/examples/keras_io/structured_data/collaborative_filtering_movielens.py +++ b/examples/keras_io/structured_data/collaborative_filtering_movielens.py @@ -37,9 +37,9 @@ import matplotlib.pyplot as plt import numpy as np from zipfile import ZipFile -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops """ ## First, load the data and apply preprocessing diff --git a/examples/keras_io/structured_data/deep_neural_decision_forests.py b/examples/keras_io/structured_data/deep_neural_decision_forests.py index e9a6ea833..edc5bb97f 100644 --- a/examples/keras_io/structured_data/deep_neural_decision_forests.py +++ b/examples/keras_io/structured_data/deep_neural_decision_forests.py @@ -33,10 +33,10 @@ and 9 categorical features. ## Setup """ -import keras_core as keras -from keras_core import layers -from keras_core.layers import StringLookup -from keras_core import ops +import keras as keras +from keras import layers +from keras.layers import StringLookup +from keras import ops from tensorflow import data as tf_data diff --git a/examples/keras_io/structured_data/tabtransformer.py b/examples/keras_io/structured_data/tabtransformer.py index 202af9a5a..2b6b94d9a 100644 --- a/examples/keras_io/structured_data/tabtransformer.py +++ b/examples/keras_io/structured_data/tabtransformer.py @@ -21,9 +21,9 @@ into robust contextual embeddings to achieve higher predictive accuracy. ## Setup """ -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops import math import numpy as np diff --git a/examples/keras_io/tensorflow/audio/speaker_recognition_using_cnn.py b/examples/keras_io/tensorflow/audio/speaker_recognition_using_cnn.py index 11de078e3..d21b1aff2 100644 --- a/examples/keras_io/tensorflow/audio/speaker_recognition_using_cnn.py +++ b/examples/keras_io/tensorflow/audio/speaker_recognition_using_cnn.py @@ -1,7 +1,7 @@ """ Title: Speaker Recognition Author: [Fadi Badine](https://twitter.com/fadibadine) -Converted to Keras Core by: [Fadi Badine](https://twitter.com/fadibadine) +Converted to Keras 3 by: [Fadi Badine](https://twitter.com/fadibadine) Date created: 14/06/2020 Last modified: 19/07/2023 Description: Classify speakers using Fast Fourier Transform (FFT) and a 1D Convnet. @@ -47,7 +47,7 @@ import shutil import numpy as np import tensorflow as tf -import keras_core as keras +import keras as keras from pathlib import Path from IPython.display import display, Audio diff --git a/examples/keras_io/tensorflow/audio/uk_ireland_accent_recognition.py b/examples/keras_io/tensorflow/audio/uk_ireland_accent_recognition.py index 3f4ab6c32..5e7c1f804 100644 --- a/examples/keras_io/tensorflow/audio/uk_ireland_accent_recognition.py +++ b/examples/keras_io/tensorflow/audio/uk_ireland_accent_recognition.py @@ -1,7 +1,7 @@ """ Title: English speaker accent recognition using Transfer Learning Author: [Fadi Badine](https://twitter.com/fadibadine) -Converted to Keras Core by: [Fadi Badine](https://twitter.com/fadibadine) +Converted to Keras 3 by: [Fadi Badine](https://twitter.com/fadibadine) Date created: 2022/04/16 Last modified: 2023/07/19 Description: Training a model to classify UK & Ireland accents using feature extraction from Yamnet. @@ -108,7 +108,7 @@ import pandas as pd import tensorflow as tf import tensorflow_hub as hub import tensorflow_io as tfio -import keras_core as keras +import keras as keras import matplotlib.pyplot as plt import seaborn as sns from scipy import stats diff --git a/examples/keras_io/tensorflow/generative/cyclegan.py b/examples/keras_io/tensorflow/generative/cyclegan.py index cfb54bc92..cb6da9cb7 100644 --- a/examples/keras_io/tensorflow/generative/cyclegan.py +++ b/examples/keras_io/tensorflow/generative/cyclegan.py @@ -27,9 +27,9 @@ using cycle-consistent adversarial networks. import numpy as np import matplotlib.pyplot as plt -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops import tensorflow as tf import tensorflow_datasets as tfds @@ -664,13 +664,13 @@ and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io # data and check the model's performance. """shell -curl -LO https://github.com/freedomtan/cyclegan-keras-core/archive/refs/tags/2.0.zip +curl -LO https://github.com/freedomtan/cyclegan-keras/archive/refs/tags/2.0.zip unzip -qq 2.0.zip """ # Load the checkpoints -weight_file = "./cyclegan-keras-core-2.0/model_checkpoints/cyclegan_checkpoints.090.weights.h5" +weight_file = "./cyclegan-keras-2.0/model_checkpoints/cyclegan_checkpoints.090.weights.h5" cycle_gan_model.load_weights(weight_file) print("Weights loaded successfully") diff --git a/examples/keras_io/tensorflow/generative/dcgan_overriding_train_step.py b/examples/keras_io/tensorflow/generative/dcgan_overriding_train_step.py index e644083bb..ed624604a 100644 --- a/examples/keras_io/tensorflow/generative/dcgan_overriding_train_step.py +++ b/examples/keras_io/tensorflow/generative/dcgan_overriding_train_step.py @@ -11,8 +11,8 @@ Accelerator: GPU """ import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import matplotlib.pyplot as plt import os import gdown diff --git a/examples/keras_io/tensorflow/generative/ddim.py b/examples/keras_io/tensorflow/generative/ddim.py index 7e4caa935..b7289c536 100644 --- a/examples/keras_io/tensorflow/generative/ddim.py +++ b/examples/keras_io/tensorflow/generative/ddim.py @@ -72,9 +72,9 @@ import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_datasets as tfds -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops """ ## Hyperparameters diff --git a/examples/keras_io/tensorflow/generative/ddpm.py b/examples/keras_io/tensorflow/generative/ddpm.py index 32a51f1a4..2d5a6700d 100644 --- a/examples/keras_io/tensorflow/generative/ddpm.py +++ b/examples/keras_io/tensorflow/generative/ddpm.py @@ -90,8 +90,8 @@ import matplotlib.pyplot as plt # Requires TensorFlow >=2.11 for the GroupNormalization layer. import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import tensorflow_datasets as tfds """ diff --git a/examples/keras_io/tensorflow/generative/deep_dream.py b/examples/keras_io/tensorflow/generative/deep_dream.py index 55e563d2b..959a2ba6a 100644 --- a/examples/keras_io/tensorflow/generative/deep_dream.py +++ b/examples/keras_io/tensorflow/generative/deep_dream.py @@ -38,8 +38,8 @@ and compare the result to the (resized) original image. import numpy as np import tensorflow as tf -import keras_core as keras -from keras_core.applications import inception_v3 +import keras as keras +from keras.applications import inception_v3 base_image_path = keras.utils.get_file( "sky.jpg", "https://i.imgur.com/aGBdQyK.jpg" diff --git a/examples/keras_io/tensorflow/generative/lstm_character_level_text_generation.py b/examples/keras_io/tensorflow/generative/lstm_character_level_text_generation.py index d10cff5fe..f506143cc 100644 --- a/examples/keras_io/tensorflow/generative/lstm_character_level_text_generation.py +++ b/examples/keras_io/tensorflow/generative/lstm_character_level_text_generation.py @@ -25,8 +25,8 @@ has at least ~100k characters. ~1M is better. """ ## Setup """ -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import numpy as np import random diff --git a/examples/keras_io/tensorflow/generative/neural_style_transfer.py b/examples/keras_io/tensorflow/generative/neural_style_transfer.py index 2d6727b09..a9bb4cb0b 100644 --- a/examples/keras_io/tensorflow/generative/neural_style_transfer.py +++ b/examples/keras_io/tensorflow/generative/neural_style_transfer.py @@ -40,8 +40,8 @@ keeping the generated image close enough to the original one. import numpy as np import tensorflow as tf -import keras_core as keras -from keras_core.applications import vgg19 +import keras as keras +from keras.applications import vgg19 base_image_path = keras.utils.get_file( "paris.jpg", "https://i.imgur.com/F28w3Ac.jpg" diff --git a/examples/keras_io/tensorflow/generative/vae.py b/examples/keras_io/tensorflow/generative/vae.py index 5d974dd65..f05a4b9d8 100644 --- a/examples/keras_io/tensorflow/generative/vae.py +++ b/examples/keras_io/tensorflow/generative/vae.py @@ -13,8 +13,8 @@ Accelerator: GPU import numpy as np import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## Create a sampling layer diff --git a/examples/keras_io/tensorflow/generative/wgan_gp.py b/examples/keras_io/tensorflow/generative/wgan_gp.py index bb8bf92ab..0592a3117 100644 --- a/examples/keras_io/tensorflow/generative/wgan_gp.py +++ b/examples/keras_io/tensorflow/generative/wgan_gp.py @@ -30,8 +30,8 @@ that keeps the L2 norm of the discriminator gradients close to 1. """ import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ diff --git a/examples/keras_io/tensorflow/keras_recipes/antirectifier.py b/examples/keras_io/tensorflow/keras_recipes/antirectifier.py index ac312882e..0b69bcdc8 100644 --- a/examples/keras_io/tensorflow/keras_recipes/antirectifier.py +++ b/examples/keras_io/tensorflow/keras_recipes/antirectifier.py @@ -23,8 +23,8 @@ features back to a space of the original size. """ import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## The Antirectifier layer diff --git a/examples/keras_io/tensorflow/keras_recipes/endpoint_layer_pattern.py b/examples/keras_io/tensorflow/keras_recipes/endpoint_layer_pattern.py index 24c004727..06c90c63a 100644 --- a/examples/keras_io/tensorflow/keras_recipes/endpoint_layer_pattern.py +++ b/examples/keras_io/tensorflow/keras_recipes/endpoint_layer_pattern.py @@ -12,7 +12,7 @@ Accelerator: GPU """ import tensorflow as tf -import keras_core as keras +import keras as keras import numpy as np """ diff --git a/examples/keras_io/tensorflow/keras_recipes/subclassing_conv_layers.py b/examples/keras_io/tensorflow/keras_recipes/subclassing_conv_layers.py index 317ff77a5..cee24e284 100644 --- a/examples/keras_io/tensorflow/keras_recipes/subclassing_conv_layers.py +++ b/examples/keras_io/tensorflow/keras_recipes/subclassing_conv_layers.py @@ -27,8 +27,8 @@ Using this approach, we can quickly implement a [StandardizedConv2D](https://arxiv.org/abs/1903.10520) as shown below. """ import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import numpy as np diff --git a/examples/keras_io/tensorflow/keras_recipes/tensorflow_numpy_models.py b/examples/keras_io/tensorflow/keras_recipes/tensorflow_numpy_models.py index 30a70298b..714b43cfd 100644 --- a/examples/keras_io/tensorflow/keras_recipes/tensorflow_numpy_models.py +++ b/examples/keras_io/tensorflow/keras_recipes/tensorflow_numpy_models.py @@ -29,8 +29,8 @@ TensorFlow NumPy requires TensorFlow 2.5 or later. import tensorflow as tf import tensorflow.experimental.numpy as tnp -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ Optionally, you can call `tnp.experimental_enable_numpy_behavior()` to enable type promotion in TensorFlow. diff --git a/examples/keras_io/tensorflow/keras_recipes/tf_serving.py b/examples/keras_io/tensorflow/keras_recipes/tf_serving.py index 84c53fcbb..bd52e2eab 100644 --- a/examples/keras_io/tensorflow/keras_recipes/tf_serving.py +++ b/examples/keras_io/tensorflow/keras_recipes/tf_serving.py @@ -60,7 +60,7 @@ import shutil import requests import numpy as np import tensorflow as tf -import keras_core as keras +import keras as keras import matplotlib.pyplot as plt """ diff --git a/examples/keras_io/tensorflow/keras_recipes/trainer_pattern.py b/examples/keras_io/tensorflow/keras_recipes/trainer_pattern.py index d74bafee4..6106ad6ed 100644 --- a/examples/keras_io/tensorflow/keras_recipes/trainer_pattern.py +++ b/examples/keras_io/tensorflow/keras_recipes/trainer_pattern.py @@ -25,7 +25,7 @@ by putting the custom training step in the Trainer class definition. """ import tensorflow as tf -import keras_core as keras +import keras as keras # Load MNIST dataset and standardize the data mnist = keras.datasets.mnist diff --git a/examples/keras_io/tensorflow/nlp/end_to_end_mlm_with_bert.py b/examples/keras_io/tensorflow/nlp/end_to_end_mlm_with_bert.py index 61bf1cf4d..5d0cbf29e 100644 --- a/examples/keras_io/tensorflow/nlp/end_to_end_mlm_with_bert.py +++ b/examples/keras_io/tensorflow/nlp/end_to_end_mlm_with_bert.py @@ -50,8 +50,8 @@ from pathlib import Path from dataclasses import dataclass import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## Configuration diff --git a/examples/keras_io/tensorflow/nlp/lstm_seq2seq.py b/examples/keras_io/tensorflow/nlp/lstm_seq2seq.py index 3dc3165a1..e81cff9d6 100644 --- a/examples/keras_io/tensorflow/nlp/lstm_seq2seq.py +++ b/examples/keras_io/tensorflow/nlp/lstm_seq2seq.py @@ -47,7 +47,7 @@ models are more common in this domain. """ import numpy as np -import keras_core as keras +import keras as keras import os from pathlib import Path diff --git a/examples/keras_io/tensorflow/nlp/ner_transformers.py b/examples/keras_io/tensorflow/nlp/ner_transformers.py index fb84dcebd..d75cd67d8 100644 --- a/examples/keras_io/tensorflow/nlp/ner_transformers.py +++ b/examples/keras_io/tensorflow/nlp/ner_transformers.py @@ -34,8 +34,8 @@ wget https://raw.githubusercontent.com/sighsmile/conlleval/master/conlleval.py import os import numpy as np -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers from datasets import load_dataset from collections import Counter from conlleval import evaluate diff --git a/examples/keras_io/tensorflow/nlp/pretrained_word_embeddings.py b/examples/keras_io/tensorflow/nlp/pretrained_word_embeddings.py index 082367fe4..92a98e271 100644 --- a/examples/keras_io/tensorflow/nlp/pretrained_word_embeddings.py +++ b/examples/keras_io/tensorflow/nlp/pretrained_word_embeddings.py @@ -13,7 +13,7 @@ Accelerator: GPU import numpy as np import tensorflow.data as tf_data -import keras_core as keras +import keras as keras """ ## Introduction @@ -123,7 +123,7 @@ Our layer will only consider the top 20,000 words, and will truncate or pad sequ be actually 200 tokens long. """ -from keras_core.layers import TextVectorization +from keras.layers import TextVectorization vectorizer = TextVectorization(max_tokens=20000, output_sequence_length=200) text_ds = tf_data.Dataset.from_tensor_slices(train_samples).batch(128) @@ -228,7 +228,7 @@ Note that we set `trainable=False` so as to keep the embeddings fixed (we don't update them during training). """ -from keras_core.layers import Embedding +from keras.layers import Embedding embedding_layer = Embedding( num_tokens, @@ -244,7 +244,7 @@ embedding_layer.set_weights([embedding_matrix]) A simple 1D convnet with global max pooling and a classifier at the end. """ -from keras_core import layers +from keras import layers int_sequences_input = keras.Input(shape=(None,), dtype="int64") embedded_sequences = embedding_layer(int_sequences_input) diff --git a/examples/keras_io/tensorflow/nlp/text_classification_from_scratch.py b/examples/keras_io/tensorflow/nlp/text_classification_from_scratch.py index 9c1a33e35..676a9a7e3 100644 --- a/examples/keras_io/tensorflow/nlp/text_classification_from_scratch.py +++ b/examples/keras_io/tensorflow/nlp/text_classification_from_scratch.py @@ -20,9 +20,9 @@ classification dataset (unprocessed version). We use the `TextVectorization` lay """ import tensorflow as tf -import keras_core as keras -from keras_core.layers import TextVectorization -from keras_core import layers +import keras as keras +from keras.layers import TextVectorization +from keras import layers import string import re import os diff --git a/examples/keras_io/tensorflow/rl/actor_critic_cartpole.py b/examples/keras_io/tensorflow/rl/actor_critic_cartpole.py index 1a4850bbd..e19042a0a 100644 --- a/examples/keras_io/tensorflow/rl/actor_critic_cartpole.py +++ b/examples/keras_io/tensorflow/rl/actor_critic_cartpole.py @@ -1,7 +1,7 @@ """ Title: Actor Critic Method Author: [Apoorv Nandan](https://twitter.com/NandanApoorv) -Converted to Keras Core by: [Muhammad Anas Raza](https://anasrz.com) +Converted to Keras 3 by: [Muhammad Anas Raza](https://anasrz.com) Date created: 2020/05/13 Last modified: 2023/07/19 Description: Implement Actor Critic Method in CartPole environment. @@ -44,8 +44,8 @@ import os os.environ["KERAS_BACKEND"] = "tensorflow" -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import gym import numpy as np diff --git a/examples/keras_io/tensorflow/structured_data/imbalanced_classification.py b/examples/keras_io/tensorflow/structured_data/imbalanced_classification.py index af4c1dddf..67da22bc2 100644 --- a/examples/keras_io/tensorflow/structured_data/imbalanced_classification.py +++ b/examples/keras_io/tensorflow/structured_data/imbalanced_classification.py @@ -20,7 +20,7 @@ to train a classification model on data with highly imbalanced classes. """ import numpy as np -import keras_core as keras +import keras as keras # Get the real data from https://www.kaggle.com/mlg-ulb/creditcardfraud/ fname = "/Users/fchollet/Downloads/creditcard.csv" diff --git a/examples/keras_io/tensorflow/structured_data/structured_data_classification_from_scratch.py b/examples/keras_io/tensorflow/structured_data/structured_data_classification_from_scratch.py index 37ee4938e..4036213c3 100644 --- a/examples/keras_io/tensorflow/structured_data/structured_data_classification_from_scratch.py +++ b/examples/keras_io/tensorflow/structured_data/structured_data_classification_from_scratch.py @@ -51,8 +51,8 @@ Target | Diagnosis of heart disease (1 = true; 0 = false) | Target import tensorflow as tf import pandas as pd -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## Preparing the data @@ -172,9 +172,9 @@ then one-hot encode these integer indices. - `encode_integer_categorical_feature` to one-hot encode integer categorical features. """ -from keras_core.layers import IntegerLookup -from keras_core.layers import Normalization -from keras_core.layers import StringLookup +from keras.layers import IntegerLookup +from keras.layers import Normalization +from keras.layers import StringLookup def encode_numerical_feature(feature, name, dataset): diff --git a/examples/keras_io/tensorflow/structured_data/structured_data_classification_with_feature_space.py b/examples/keras_io/tensorflow/structured_data/structured_data_classification_with_feature_space.py index c0c0eab6b..301bcb9ae 100644 --- a/examples/keras_io/tensorflow/structured_data/structured_data_classification_with_feature_space.py +++ b/examples/keras_io/tensorflow/structured_data/structured_data_classification_with_feature_space.py @@ -61,8 +61,8 @@ Target | Diagnosis of heart disease (1 = true; 0 = false) | Target import tensorflow as tf import pandas as pd -import keras_core as keras -from keras_core.utils import FeatureSpace +import keras as keras +from keras.utils import FeatureSpace keras.config.disable_traceback_filtering() diff --git a/examples/keras_io/tensorflow/timeseries/eeg_signal_classification.py b/examples/keras_io/tensorflow/timeseries/eeg_signal_classification.py index 3fb51087e..4a4ad9589 100644 --- a/examples/keras_io/tensorflow/timeseries/eeg_signal_classification.py +++ b/examples/keras_io/tensorflow/timeseries/eeg_signal_classification.py @@ -65,8 +65,8 @@ import pandas as pd import matplotlib.pyplot as plt import json import numpy as np -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import tensorflow as tf from sklearn import preprocessing, model_selection import random diff --git a/examples/keras_io/tensorflow/timeseries/timeseries_traffic_forecasting.py b/examples/keras_io/tensorflow/timeseries/timeseries_traffic_forecasting.py index ac4539c6c..56f66e600 100644 --- a/examples/keras_io/tensorflow/timeseries/timeseries_traffic_forecasting.py +++ b/examples/keras_io/tensorflow/timeseries/timeseries_traffic_forecasting.py @@ -47,9 +47,9 @@ import typing import matplotlib.pyplot as plt import tensorflow as tf -import keras_core as keras -from keras_core import layers -from keras_core.utils import timeseries_dataset_from_array +import keras as keras +from keras import layers +from keras.utils import timeseries_dataset_from_array """ ## Data preparation diff --git a/examples/keras_io/tensorflow/timeseries/timeseries_weather_forecasting.py b/examples/keras_io/tensorflow/timeseries/timeseries_weather_forecasting.py index b484fba76..b2dd13955 100644 --- a/examples/keras_io/tensorflow/timeseries/timeseries_weather_forecasting.py +++ b/examples/keras_io/tensorflow/timeseries/timeseries_weather_forecasting.py @@ -14,7 +14,7 @@ This example requires TensorFlow 2.3 or higher. import pandas as pd import matplotlib.pyplot as plt -import keras_core as keras +import keras as keras """ ## Climate Data Time-Series diff --git a/examples/keras_io/tensorflow/vision/bit.py b/examples/keras_io/tensorflow/vision/bit.py index b809a107f..64dd72e91 100644 --- a/examples/keras_io/tensorflow/vision/bit.py +++ b/examples/keras_io/tensorflow/vision/bit.py @@ -42,7 +42,7 @@ import os os.environ["KERAS_BACKEND"] = "tensorflow" -import keras_core as keras +import keras as keras import numpy as np import matplotlib.pyplot as plt diff --git a/examples/keras_io/tensorflow/vision/cutmix.py b/examples/keras_io/tensorflow/vision/cutmix.py index 8fae84593..115c0dd0d 100644 --- a/examples/keras_io/tensorflow/vision/cutmix.py +++ b/examples/keras_io/tensorflow/vision/cutmix.py @@ -1,7 +1,7 @@ """ Title: CutMix data augmentation for image classification Author: [Sayan Nath](https://twitter.com/sayannath2350) -Converted to Keras Core By: [Piyush Thakur](https://github.com/cosmo3769) +Converted to Keras 3 By: [Piyush Thakur](https://github.com/cosmo3769) Date created: 2021/06/08 Last modified: 2023/07/24 Description: Data augmentation with CutMix for image classification on CIFAR-10. @@ -48,10 +48,10 @@ where `rx, ry` are randomly drawn from a uniform distribution with upper bound. import numpy as np import pandas as pd -import keras_core as keras +import keras as keras import matplotlib.pyplot as plt -from keras_core import layers +from keras import layers # TF imports related to tf.data preprocessing from tensorflow import clip_by_value diff --git a/examples/keras_io/tensorflow/vision/deit.py b/examples/keras_io/tensorflow/vision/deit.py index 09b988c6e..003b06821 100644 --- a/examples/keras_io/tensorflow/vision/deit.py +++ b/examples/keras_io/tensorflow/vision/deit.py @@ -49,8 +49,8 @@ os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf import tensorflow_datasets as tfds -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers tfds.disable_progress_bar() keras.utils.set_random_seed(42) diff --git a/examples/keras_io/tensorflow/vision/grad_cam.py b/examples/keras_io/tensorflow/vision/grad_cam.py index e69cc8e54..8cd07f4d9 100644 --- a/examples/keras_io/tensorflow/vision/grad_cam.py +++ b/examples/keras_io/tensorflow/vision/grad_cam.py @@ -13,7 +13,7 @@ Adapted from Deep Learning with Python (2017). import numpy as np import tensorflow as tf -import keras_core as keras +import keras as keras # Display from IPython.display import Image, display diff --git a/examples/keras_io/tensorflow/vision/image_captioning.py b/examples/keras_io/tensorflow/vision/image_captioning.py index ad5136cb8..449481f60 100644 --- a/examples/keras_io/tensorflow/vision/image_captioning.py +++ b/examples/keras_io/tensorflow/vision/image_captioning.py @@ -17,10 +17,10 @@ import numpy as np import matplotlib.pyplot as plt import tensorflow as tf -import keras_core as keras -from keras_core import layers -from keras_core.applications import efficientnet -from keras_core.layers import TextVectorization +import keras as keras +from keras import layers +from keras.applications import efficientnet +from keras.layers import TextVectorization seed = 111 diff --git a/examples/keras_io/tensorflow/vision/image_classification_from_scratch.py b/examples/keras_io/tensorflow/vision/image_classification_from_scratch.py index b9f39a0e6..205b77740 100644 --- a/examples/keras_io/tensorflow/vision/image_classification_from_scratch.py +++ b/examples/keras_io/tensorflow/vision/image_classification_from_scratch.py @@ -23,8 +23,8 @@ we use Keras image preprocessing layers for image standardization and data augme """ import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import os from pathlib import Path import matplotlib.pyplot as plt diff --git a/examples/keras_io/tensorflow/vision/integrated_gradients.py b/examples/keras_io/tensorflow/vision/integrated_gradients.py index d620ad769..6006dcf84 100644 --- a/examples/keras_io/tensorflow/vision/integrated_gradients.py +++ b/examples/keras_io/tensorflow/vision/integrated_gradients.py @@ -58,9 +58,9 @@ from scipy import ndimage from IPython.display import Image, display import tensorflow as tf -import keras_core as keras -from keras_core import layers -from keras_core.applications import xception +import keras as keras +from keras import layers +from keras.applications import xception keras.config.disable_traceback_filtering() diff --git a/examples/keras_io/tensorflow/vision/involution.py b/examples/keras_io/tensorflow/vision/involution.py index a44b6c7ae..733a39a40 100644 --- a/examples/keras_io/tensorflow/vision/involution.py +++ b/examples/keras_io/tensorflow/vision/involution.py @@ -36,7 +36,7 @@ layer. """ import tensorflow as tf -import keras_core as keras +import keras as keras import matplotlib.pyplot as plt # Set seed for reproducibility. diff --git a/examples/keras_io/tensorflow/vision/metric_learning.py b/examples/keras_io/tensorflow/vision/metric_learning.py index c8d9e4344..95ae8cee3 100644 --- a/examples/keras_io/tensorflow/vision/metric_learning.py +++ b/examples/keras_io/tensorflow/vision/metric_learning.py @@ -32,9 +32,9 @@ import tensorflow as tf from collections import defaultdict from PIL import Image from sklearn.metrics import ConfusionMatrixDisplay -import keras_core as keras -from keras_core import layers -from keras_core.datasets import cifar10 +import keras as keras +from keras import layers +from keras.datasets import cifar10 """ ## Dataset diff --git a/examples/keras_io/tensorflow/vision/mirnet.py b/examples/keras_io/tensorflow/vision/mirnet.py index b37f201ea..78d972801 100644 --- a/examples/keras_io/tensorflow/vision/mirnet.py +++ b/examples/keras_io/tensorflow/vision/mirnet.py @@ -1,7 +1,7 @@ """ Title: Low-light image enhancement using MIRNet Author: [Soumik Rakshit](http://github.com/soumik12345) -Converted to Keras Core by: [Soumik Rakshit](http://github.com/soumik12345) +Converted to Keras 3 by: [Soumik Rakshit](http://github.com/soumik12345) Date created: 2021/09/11 Last modified: 2023/07/15 Description: Implementing the MIRNet architecture for low-light image enhancement. @@ -34,7 +34,7 @@ consists of a low-light input image and its corresponding well-exposed reference """ """shell -pip install -q git+https://github.com/keras-team/keras-core +pip install -q git+https://github.com/keras-team/keras """ import os @@ -47,8 +47,8 @@ from glob import glob from PIL import Image, ImageOps import matplotlib.pyplot as plt -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import tensorflow as tf diff --git a/examples/keras_io/tensorflow/vision/mixup.py b/examples/keras_io/tensorflow/vision/mixup.py index e58654dec..0d40dea75 100644 --- a/examples/keras_io/tensorflow/vision/mixup.py +++ b/examples/keras_io/tensorflow/vision/mixup.py @@ -37,10 +37,10 @@ processing, speech, and so on. """ import numpy as np -import keras_core as keras +import keras as keras import matplotlib.pyplot as plt -from keras_core import layers +from keras import layers # TF imports related to tf.data preprocessing from tensorflow import data as tf_data diff --git a/examples/keras_io/tensorflow/vision/perceiver_image_classification.py b/examples/keras_io/tensorflow/vision/perceiver_image_classification.py index cb37aa5f5..ad58406fc 100644 --- a/examples/keras_io/tensorflow/vision/perceiver_image_classification.py +++ b/examples/keras_io/tensorflow/vision/perceiver_image_classification.py @@ -43,8 +43,8 @@ pip install -U tensorflow-addons import numpy as np import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## Prepare the data diff --git a/examples/keras_io/tensorflow/vision/reptile.py b/examples/keras_io/tensorflow/vision/reptile.py index 91c6373dc..ef79f97a7 100644 --- a/examples/keras_io/tensorflow/vision/reptile.py +++ b/examples/keras_io/tensorflow/vision/reptile.py @@ -1,7 +1,7 @@ """ Title: Few-Shot learning with Reptile Author: [ADMoreau](https://github.com/ADMoreau) -Converted to Keras Core By: [Muhammad Anas Raza](https://anasrz.com) +Converted to Keras 3 By: [Muhammad Anas Raza](https://anasrz.com) Date created: 2020/05/21 Last modified: 2023/07/20 Description: Few-shot classification on the Omniglot dataset using Reptile. @@ -22,8 +22,8 @@ import os os.environ["KERAS_BACKEND"] = "tensorflow" -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import matplotlib.pyplot as plt import numpy as np diff --git a/examples/keras_io/tensorflow/vision/semisupervised_simclr.py b/examples/keras_io/tensorflow/vision/semisupervised_simclr.py index f62465385..70388ca9d 100644 --- a/examples/keras_io/tensorflow/vision/semisupervised_simclr.py +++ b/examples/keras_io/tensorflow/vision/semisupervised_simclr.py @@ -77,8 +77,8 @@ import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_datasets as tfds -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## Hyperparameter setup diff --git a/examples/keras_io/tensorflow/vision/shiftvit.py b/examples/keras_io/tensorflow/vision/shiftvit.py index 3c1dcd707..2f0f67017 100644 --- a/examples/keras_io/tensorflow/vision/shiftvit.py +++ b/examples/keras_io/tensorflow/vision/shiftvit.py @@ -1,7 +1,7 @@ """ Title: A Vision Transformer without Attention Author: [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Ritwik Raha](https://twitter.com/ritwik_raha) -Converted to Keras Core: [Muhammad Anas Raza](https://anasrz.com) +Converted to Keras 3: [Muhammad Anas Raza](https://anasrz.com) Date created: 2022/02/24 Last modified: 2023/07/15 Description: A minimal implementation of ShiftViT. @@ -40,8 +40,8 @@ import numpy as np import matplotlib.pyplot as plt import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers # Setting seed for reproducibiltiy diff --git a/examples/keras_io/tensorflow/vision/siamese_network.py b/examples/keras_io/tensorflow/vision/siamese_network.py index 98b7b912d..04ff9053b 100644 --- a/examples/keras_io/tensorflow/vision/siamese_network.py +++ b/examples/keras_io/tensorflow/vision/siamese_network.py @@ -38,11 +38,11 @@ import numpy as np import os import tensorflow as tf from pathlib import Path -from keras_core import layers -from keras_core import optimizers -from keras_core import metrics -from keras_core import Model -from keras_core.applications import resnet +from keras import layers +from keras import optimizers +from keras import metrics +from keras import Model +from keras.applications import resnet target_shape = (200, 200) diff --git a/examples/keras_io/tensorflow/vision/simsiam.py b/examples/keras_io/tensorflow/vision/simsiam.py index b9671407e..e7e630cce 100644 --- a/examples/keras_io/tensorflow/vision/simsiam.py +++ b/examples/keras_io/tensorflow/vision/simsiam.py @@ -49,9 +49,9 @@ versions of our dataset. import os os.environ['KERAS_BACKEND'] = 'tensorflow' -from keras_core import layers -from keras_core import regularizers -import keras_core as keras +from keras import layers +from keras import regularizers +import keras as keras import tensorflow as tf import matplotlib.pyplot as plt diff --git a/examples/keras_io/tensorflow/vision/swim_transformers.py b/examples/keras_io/tensorflow/vision/swim_transformers.py index 02ba25a36..529ca5507 100644 --- a/examples/keras_io/tensorflow/vision/swim_transformers.py +++ b/examples/keras_io/tensorflow/vision/swim_transformers.py @@ -29,8 +29,8 @@ This example requires TensorFlow 2.5 or higher. import matplotlib.pyplot as plt import numpy as np import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## Prepare the data diff --git a/examples/keras_io/tensorflow/vision/visualizing_what_convnets_learn.py b/examples/keras_io/tensorflow/vision/visualizing_what_convnets_learn.py index 3b1233c16..3ed975c6d 100644 --- a/examples/keras_io/tensorflow/vision/visualizing_what_convnets_learn.py +++ b/examples/keras_io/tensorflow/vision/visualizing_what_convnets_learn.py @@ -26,7 +26,7 @@ import os os.environ["KERAS_BACKEND"] = "tensorflow" -import keras_core as keras +import keras as keras import numpy as np diff --git a/examples/keras_io/tensorflow/vision/zero_dce.py b/examples/keras_io/tensorflow/vision/zero_dce.py index b17473bee..c2b903f0e 100644 --- a/examples/keras_io/tensorflow/vision/zero_dce.py +++ b/examples/keras_io/tensorflow/vision/zero_dce.py @@ -1,7 +1,7 @@ """ Title: Zero-DCE for low-light image enhancement Author: [Soumik Rakshit](http://github.com/soumik12345) -Converted to Keras Core by: [Soumik Rakshit](http://github.com/soumik12345) +Converted to Keras 3 by: [Soumik Rakshit](http://github.com/soumik12345) Date created: 2021/09/18 Last modified: 2023/07/15 Description: Implementing Zero-Reference Deep Curve Estimation for low-light image enhancement. @@ -49,8 +49,8 @@ from glob import glob from PIL import Image, ImageOps import matplotlib.pyplot as plt -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import tensorflow as tf diff --git a/examples/keras_io/timeseries/timeseries_anomaly_detection.py b/examples/keras_io/timeseries/timeseries_anomaly_detection.py index 9a59fcd02..1175b3039 100644 --- a/examples/keras_io/timeseries/timeseries_anomaly_detection.py +++ b/examples/keras_io/timeseries/timeseries_anomaly_detection.py @@ -20,8 +20,8 @@ autoencoder model to detect anomalies in timeseries data. import numpy as np import pandas as pd -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers from matplotlib import pyplot as plt """ diff --git a/examples/keras_io/timeseries/timeseries_classification_from_scratch.py b/examples/keras_io/timeseries/timeseries_classification_from_scratch.py index 8cd668db3..71850e0a1 100755 --- a/examples/keras_io/timeseries/timeseries_classification_from_scratch.py +++ b/examples/keras_io/timeseries/timeseries_classification_from_scratch.py @@ -19,7 +19,7 @@ CSV timeseries files on disk. We demonstrate the workflow on the FordA dataset f ## Setup """ -import keras_core as keras +import keras as keras import numpy as np import matplotlib.pyplot as plt diff --git a/examples/keras_io/timeseries/timeseries_classification_transformer.py b/examples/keras_io/timeseries/timeseries_classification_transformer.py index f9826447d..8195b30b6 100644 --- a/examples/keras_io/timeseries/timeseries_classification_transformer.py +++ b/examples/keras_io/timeseries/timeseries_classification_transformer.py @@ -62,8 +62,8 @@ You can replace your classification RNN layers with this one: the inputs are fully compatible! """ -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ We include residual connections, layer normalization, and dropout. diff --git a/examples/keras_io/vision/attention_mil_classification.py b/examples/keras_io/vision/attention_mil_classification.py index 50198bad8..8092f01ec 100644 --- a/examples/keras_io/vision/attention_mil_classification.py +++ b/examples/keras_io/vision/attention_mil_classification.py @@ -54,9 +54,9 @@ by TensorFlow. """ import numpy as np -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops from tqdm import tqdm from matplotlib import pyplot as plt diff --git a/examples/keras_io/vision/autoencoder.py b/examples/keras_io/vision/autoencoder.py index 91d2c5c3f..95005b231 100644 --- a/examples/keras_io/vision/autoencoder.py +++ b/examples/keras_io/vision/autoencoder.py @@ -24,9 +24,9 @@ by [François Chollet](https://twitter.com/fchollet). import numpy as np import matplotlib.pyplot as plt -from keras_core import layers -from keras_core.datasets import mnist -from keras_core.models import Model +from keras import layers +from keras.datasets import mnist +from keras.models import Model def preprocess(array): diff --git a/examples/keras_io/vision/cct.py b/examples/keras_io/vision/cct.py index 59a32200b..29d602748 100644 --- a/examples/keras_io/vision/cct.py +++ b/examples/keras_io/vision/cct.py @@ -1,7 +1,7 @@ """ Title: Compact Convolutional Transformers Author: [Sayak Paul](https://twitter.com/RisingSayak) -Converted to Keras Core by: [Muhammad Anas Raza](https://anasrz.com), [Guillaume Baquiast](https://www.linkedin.com/in/guillaume-baquiast-478965ba/) +Converted to Keras 3 by: [Muhammad Anas Raza](https://anasrz.com), [Guillaume Baquiast](https://www.linkedin.com/in/guillaume-baquiast-478965ba/) Date created: 2021/06/30 Last modified: 2023/08/07 Description: Compact Convolutional Transformers for efficient image classification. @@ -39,8 +39,8 @@ code snippets from another example, ## Imports """ -from keras_core import layers -import keras_core as keras +from keras import layers +import keras as keras import matplotlib.pyplot as plt import numpy as np diff --git a/examples/keras_io/vision/conv_lstm.py b/examples/keras_io/vision/conv_lstm.py index e6bb601f3..b538a8244 100644 --- a/examples/keras_io/vision/conv_lstm.py +++ b/examples/keras_io/vision/conv_lstm.py @@ -25,8 +25,8 @@ of predicting what video frames come next given a series of past frames. import numpy as np import matplotlib.pyplot as plt -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import io import imageio diff --git a/examples/keras_io/vision/deeplabv3_plus.py b/examples/keras_io/vision/deeplabv3_plus.py index c3c7ce4ff..8e26f94be 100644 --- a/examples/keras_io/vision/deeplabv3_plus.py +++ b/examples/keras_io/vision/deeplabv3_plus.py @@ -1,7 +1,7 @@ """ Title: Multiclass semantic segmentation using DeepLabV3+ Author: [Soumik Rakshit](http://github.com/soumik12345) -Converted to Keras Core: [Muhammad Anas Raza](https://anasrz.com) +Converted to Keras 3: [Muhammad Anas Raza](https://anasrz.com) Date created: 2021/08/31 Last modified: 2023/07/19 Description: Implement DeepLabV3+ architecture for Multi-class Semantic Segmentation. @@ -32,9 +32,9 @@ This dataset can be used for the "human part segmentation" task. """ -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops import cv2 import numpy as np diff --git a/examples/keras_io/vision/eanet.py b/examples/keras_io/vision/eanet.py index ebee1ee61..2b56ecd52 100644 --- a/examples/keras_io/vision/eanet.py +++ b/examples/keras_io/vision/eanet.py @@ -1,7 +1,7 @@ """ Title: Image classification with EANet (External Attention Transformer) Author: [ZhiYong Chang](https://github.com/czy00000) -Converted to Keras Core: [Muhammad Anas Raza](https://anasrz.com) +Converted to Keras 3: [Muhammad Anas Raza](https://anasrz.com) Date created: 2021/10/19 Last modified: 2023/07/18 Description: Image classification with a Transformer that leverages external attention. @@ -25,9 +25,9 @@ implicitly considers the correlations between all samples. ## Setup """ -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops import matplotlib.pyplot as plt diff --git a/examples/keras_io/vision/fixres.py b/examples/keras_io/vision/fixres.py index 0bb2632ba..b6eb59e37 100644 --- a/examples/keras_io/vision/fixres.py +++ b/examples/keras_io/vision/fixres.py @@ -27,8 +27,8 @@ to fix this discrepancy. ## Imports """ -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import tensorflow as tf # just for image processing and pipeline import tensorflow_datasets as tfds diff --git a/examples/keras_io/vision/gradient_centralization.py b/examples/keras_io/vision/gradient_centralization.py index a70dff191..708888f53 100644 --- a/examples/keras_io/vision/gradient_centralization.py +++ b/examples/keras_io/vision/gradient_centralization.py @@ -1,7 +1,7 @@ """ Title: Gradient Centralization for Better Training Performance Author: [Rishit Dagli](https://github.com/Rishit-dagli) -Converted to Keras Core by: [Muhammad Anas Raza](https://anasrz.com) +Converted to Keras 3 by: [Muhammad Anas Raza](https://anasrz.com) Date created: 06/18/21 Last modified: 07/25/23 Description: Implement Gradient Centralization to improve training performance of DNNs. @@ -34,10 +34,10 @@ pip install tensorflow-datasets from time import time -import keras_core as keras -from keras_core import layers -from keras_core.optimizers import RMSprop -from keras_core import ops +import keras as keras +from keras import layers +from keras.optimizers import RMSprop +from keras import ops from tensorflow import data as tf_data import tensorflow_datasets as tfds diff --git a/examples/keras_io/vision/image_classification_with_vision_transformer.py b/examples/keras_io/vision/image_classification_with_vision_transformer.py index 307abe967..dc4154061 100644 --- a/examples/keras_io/vision/image_classification_with_vision_transformer.py +++ b/examples/keras_io/vision/image_classification_with_vision_transformer.py @@ -1,7 +1,7 @@ """ Title: Image classification with Vision Transformer Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/) -Converted to Keras Core by: [divyasreepat](https://github.com/divyashreepathihalli), [Soumik Rakshit](http://github.com/soumik12345) +Converted to Keras 3 by: [divyasreepat](https://github.com/divyashreepathihalli), [Soumik Rakshit](http://github.com/soumik12345) Date created: 2021/01/18 Last modified: 2021/01/18 Description: Implementing the Vision Transformer (ViT) model for image classification. @@ -27,9 +27,9 @@ import os os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"] -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops import numpy as np import matplotlib.pyplot as plt diff --git a/examples/keras_io/vision/image_classifier.py b/examples/keras_io/vision/image_classifier.py index d681446dc..5bd72493f 100644 --- a/examples/keras_io/vision/image_classifier.py +++ b/examples/keras_io/vision/image_classifier.py @@ -25,7 +25,7 @@ classification problems at three levels of complexity: ## Multi-Backend Support KerasCV's `ImageClassifier` model supports several backends like JAX, PyTorch, -and TensorFlow with the help of `keras_core`. To enable multi-backend support +and TensorFlow with the help of `keras`. To enable multi-backend support in KerasCV, set the `KERAS_CV_MULTI_BACKEND` environment variable. We can then switch between different backends by setting the `KERAS_BACKEND` environment variable. Currently, `"tensorflow"`, `"jax"`, and `"torch"` are @@ -42,12 +42,12 @@ os.environ["KERAS_BACKEND"] = "jax" import json import math import keras_cv -import keras_core as keras -from keras_core import ops -from keras_core import losses -from keras_core import optimizers -from keras_core.optimizers import schedules -from keras_core import metrics +import keras as keras +from keras import ops +from keras import losses +from keras import optimizers +from keras.optimizers import schedules +from keras import metrics import tensorflow as tf from tensorflow import data as tf_data import tensorflow_datasets as tfds diff --git a/examples/keras_io/vision/keypoint_detection.py b/examples/keras_io/vision/keypoint_detection.py index 4b50220b4..d4e1d260e 100644 --- a/examples/keras_io/vision/keypoint_detection.py +++ b/examples/keras_io/vision/keypoint_detection.py @@ -1,7 +1,7 @@ """ Title: Keypoint Detection with Transfer Learning Author: [Sayak Paul](https://twitter.com/RisingSayak) -Converted to Keras Core by: [Muhammad Anas Raza](https://anasrz.com) +Converted to Keras 3 by: [Muhammad Anas Raza](https://anasrz.com) Date created: 2021/05/02 Last modified: 2023/07/19 Description: Training a keypoint detector with data augmentation and transfer learning. @@ -57,8 +57,8 @@ unzip -qq ~/stanfordextra_v12.zip """ ## Imports """ -from keras_core import layers -import keras_core as keras +from keras import layers +import keras as keras from imgaug.augmentables.kps import KeypointsOnImage from imgaug.augmentables.kps import Keypoint diff --git a/examples/keras_io/vision/learnable_resizer.py b/examples/keras_io/vision/learnable_resizer.py index b33fb067c..b4fa9db26 100644 --- a/examples/keras_io/vision/learnable_resizer.py +++ b/examples/keras_io/vision/learnable_resizer.py @@ -39,9 +39,9 @@ using the [DenseNet-121](https://arxiv.org/abs/1608.06993) architecture. ## Setup """ -from keras_core import layers -import keras_core as keras -from keras_core import ops +from keras import layers +import keras as keras +from keras import ops from tensorflow import data as tf_data from tensorflow import image as tf_image diff --git a/examples/keras_io/vision/mlp_image_classification.py b/examples/keras_io/vision/mlp_image_classification.py index efe457cdf..c245ab714 100644 --- a/examples/keras_io/vision/mlp_image_classification.py +++ b/examples/keras_io/vision/mlp_image_classification.py @@ -1,7 +1,7 @@ """ Title: Image classification with modern MLP models Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/) -Converted to Keras Core by: [Guillaume Baquiast](https://www.linkedin.com/in/guillaume-baquiast-478965ba/), [divyasreepat](https://github.com/divyashreepathihalli) +Converted to Keras 3 by: [Guillaume Baquiast](https://www.linkedin.com/in/guillaume-baquiast-478965ba/), [divyasreepat](https://github.com/divyashreepathihalli) Date created: 2021/05/30 Last modified: 2023/08/03 Description: Implementing the MLP-Mixer, FNet, and gMLP models for CIFAR-100 image classification. @@ -29,8 +29,8 @@ main building blocks. """ import numpy as np -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## Prepare the data diff --git a/examples/keras_io/vision/mnist_convnet.py b/examples/keras_io/vision/mnist_convnet.py index 3c722cece..4e3e05e4c 100644 --- a/examples/keras_io/vision/mnist_convnet.py +++ b/examples/keras_io/vision/mnist_convnet.py @@ -12,8 +12,8 @@ Accelerator: GPU """ import numpy as np -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers """ ## Prepare the data diff --git a/examples/keras_io/vision/object_detection_using_vision_transformer.py b/examples/keras_io/vision/object_detection_using_vision_transformer.py index 090723845..fa11bbfaf 100644 --- a/examples/keras_io/vision/object_detection_using_vision_transformer.py +++ b/examples/keras_io/vision/object_detection_using_vision_transformer.py @@ -1,7 +1,7 @@ """ Title: Object detection with Vision Transformers Author: [Karan V. Dave](https://www.linkedin.com/in/karan-dave-811413164/) -Converted to Keras Core by: [Gabriel Rasskin](https://github.com/grasskin), [Soumik Rakshit](http://github.com/soumik12345) +Converted to Keras 3 by: [Gabriel Rasskin](https://github.com/grasskin), [Soumik Rakshit](http://github.com/soumik12345) Date created: 2022/03/27 Last modified: 2022/03/27 Description: A simple Keras implementation of object detection using Vision Transformers. @@ -29,7 +29,7 @@ from which we import the `AdamW` optimizer. TensorFlow Addons can be installed via the following command: ``` -pip install -U git+https://github.com/keras-team/keras-core +pip install -U git+https://github.com/keras-team/keras ``` """ @@ -43,9 +43,9 @@ os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"] import numpy as np -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops import matplotlib.pyplot as plt import numpy as np import cv2 diff --git a/examples/keras_io/vision/oxford_pets_image_segmentation.py b/examples/keras_io/vision/oxford_pets_image_segmentation.py index 58387da82..e1507f76e 100644 --- a/examples/keras_io/vision/oxford_pets_image_segmentation.py +++ b/examples/keras_io/vision/oxford_pets_image_segmentation.py @@ -58,7 +58,7 @@ for input_path, target_path in zip(input_img_paths[:10], target_img_paths[:10]): """ from IPython.display import Image, display -from keras_core.utils import load_img +from keras.utils import load_img from PIL import ImageOps # Display input image #7 @@ -72,7 +72,7 @@ display(img) ## Prepare dataset to load & vectorize batches of data """ -import keras_core as keras +import keras as keras import numpy as np from tensorflow import data as tf_data from tensorflow import image as tf_image @@ -118,7 +118,7 @@ def get_dataset( ## Prepare U-Net Xception-style model """ -from keras_core import layers +from keras import layers def get_model(img_size, num_classes): diff --git a/examples/keras_io/vision/siamese_contrastive.py b/examples/keras_io/vision/siamese_contrastive.py index 3bc1ea13f..12c87b8ed 100644 --- a/examples/keras_io/vision/siamese_contrastive.py +++ b/examples/keras_io/vision/siamese_contrastive.py @@ -26,8 +26,8 @@ the class segmentation of the training inputs. import random import numpy as np -import keras_core as keras -from keras_core import ops +import keras as keras +from keras import ops import matplotlib.pyplot as plt """ diff --git a/examples/keras_io/vision/token_learner.py b/examples/keras_io/vision/token_learner.py index 96a8f0c5f..f6d9c370a 100644 --- a/examples/keras_io/vision/token_learner.py +++ b/examples/keras_io/vision/token_learner.py @@ -1,7 +1,7 @@ """ Title: Learning to tokenize in Vision Transformers Authors: [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Sayak Paul](https://twitter.com/RisingSayak) (equal contribution) -Converted to Keras Core by: [Muhammad Anas Raza](https://anasrz.com) +Converted to Keras 3 by: [Muhammad Anas Raza](https://anasrz.com) Date created: 2021/12/10 Last modified: 2023/08/14 Description: Adaptively generating a smaller number of tokens for Vision Transformers. @@ -49,9 +49,9 @@ references: ## Imports """ -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops from tensorflow import data as tf_data diff --git a/examples/keras_io/vision/video_transformers.py b/examples/keras_io/vision/video_transformers.py index 2bbea8e68..4969511ab 100644 --- a/examples/keras_io/vision/video_transformers.py +++ b/examples/keras_io/vision/video_transformers.py @@ -1,7 +1,7 @@ """ Title: Video Classification with Transformers Author: [Sayak Paul](https://twitter.com/RisingSayak) -Converted to Keras Core by: [Soumik Rakshit](http://github.com/soumik12345) +Converted to Keras 3 by: [Soumik Rakshit](http://github.com/soumik12345) Date created: 2021/06/08 Last modified: 2023/22/07 Description: Training a video classifier with hybrid transformers. @@ -19,7 +19,7 @@ classification that operate on CNN feature maps. """ """shell -pip install -q git+https://github.com/keras-team/keras-core +pip install -q git+https://github.com/keras-team/keras pip install -q git+https://github.com/tensorflow/docs """ @@ -47,9 +47,9 @@ import os os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"] -import keras_core as keras -from keras_core import layers -from keras_core.applications.densenet import DenseNet121 +import keras as keras +from keras import layers +from keras.applications.densenet import DenseNet121 from tensorflow_docs.vis import embed diff --git a/guides/custom_train_step_in_jax.py b/guides/custom_train_step_in_jax.py index 3b36a3912..a8cd1f8bf 100644 --- a/guides/custom_train_step_in_jax.py +++ b/guides/custom_train_step_in_jax.py @@ -48,7 +48,7 @@ import os os.environ["KERAS_BACKEND"] = "jax" import jax -import keras_core as keras +import keras as keras import numpy as np """ diff --git a/guides/custom_train_step_in_tensorflow.py b/guides/custom_train_step_in_tensorflow.py index 225dfdc6d..9a57c020c 100644 --- a/guides/custom_train_step_in_tensorflow.py +++ b/guides/custom_train_step_in_tensorflow.py @@ -48,8 +48,8 @@ import os os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import numpy as np """ diff --git a/guides/custom_train_step_in_torch.py b/guides/custom_train_step_in_torch.py index 25859019f..3cfd8b404 100644 --- a/guides/custom_train_step_in_torch.py +++ b/guides/custom_train_step_in_torch.py @@ -48,8 +48,8 @@ import os os.environ["KERAS_BACKEND"] = "torch" import torch -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import numpy as np """ diff --git a/guides/distributed_training_with_jax.py b/guides/distributed_training_with_jax.py index 8d55d2acd..3b8cddb34 100644 --- a/guides/distributed_training_with_jax.py +++ b/guides/distributed_training_with_jax.py @@ -46,7 +46,7 @@ os.environ["KERAS_BACKEND"] = "jax" import jax import numpy as np import tensorflow as tf -import keras_core as keras +import keras as keras from jax.experimental import mesh_utils from jax.sharding import Mesh diff --git a/guides/distributed_training_with_tensorflow.py b/guides/distributed_training_with_tensorflow.py index 4f84188e1..2b79d6e89 100644 --- a/guides/distributed_training_with_tensorflow.py +++ b/guides/distributed_training_with_tensorflow.py @@ -42,7 +42,7 @@ import os os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf -import keras_core as keras +import keras as keras """ ## Single-host, multi-device synchronous training diff --git a/guides/distributed_training_with_torch.py b/guides/distributed_training_with_torch.py index 30ff06d3a..35d56793c 100644 --- a/guides/distributed_training_with_torch.py +++ b/guides/distributed_training_with_torch.py @@ -46,7 +46,7 @@ os.environ["KERAS_BACKEND"] = "torch" import torch import numpy as np -import keras_core as keras +import keras as keras def get_model(): diff --git a/guides/functional_api.py b/guides/functional_api.py index 083ecf69c..a5273b4d6 100644 --- a/guides/functional_api.py +++ b/guides/functional_api.py @@ -11,9 +11,9 @@ Accelerator: GPU """ import numpy as np -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops """ ## Introduction diff --git a/guides/making_new_layers_and_models_via_subclassing.py b/guides/making_new_layers_and_models_via_subclassing.py index c8c7892bb..fcaea43f5 100644 --- a/guides/making_new_layers_and_models_via_subclassing.py +++ b/guides/making_new_layers_and_models_via_subclassing.py @@ -29,9 +29,9 @@ Let's dive in. """ import numpy as np -import keras_core as keras -from keras_core import ops -from keras_core import layers +import keras as keras +from keras import ops +from keras import layers """ ## The `Layer` class: the combination of state (weights) and some computation diff --git a/guides/sequential_model.py b/guides/sequential_model.py index a778fa53c..b95c46e81 100644 --- a/guides/sequential_model.py +++ b/guides/sequential_model.py @@ -11,9 +11,9 @@ Accelerator: GPU """ -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops """ ## When to use a Sequential model diff --git a/guides/training_with_built_in_methods.py b/guides/training_with_built_in_methods.py index 42f9e1536..fda4f18a4 100644 --- a/guides/training_with_built_in_methods.py +++ b/guides/training_with_built_in_methods.py @@ -17,9 +17,9 @@ import tensorflow as tf import os import numpy as np -import keras_core as keras -from keras_core import layers -from keras_core import ops +import keras as keras +from keras import layers +from keras import ops """ ## Introduction @@ -51,7 +51,7 @@ This guide doesn't cover distributed training, which is covered in our When passing data to the built-in training loops of a model, you should either use: - NumPy arrays (if your data is small and fits in memory) -- Subclasses of `keras_core.utils.PyDataset` +- Subclasses of `keras.utils.PyDataset` - `tf.data.Dataset` objects - PyTorch `DataLoader` instances diff --git a/guides/transfer_learning.py b/guides/transfer_learning.py index dcdb7d463..760041c59 100644 --- a/guides/transfer_learning.py +++ b/guides/transfer_learning.py @@ -11,8 +11,8 @@ Accelerator: GPU """ import numpy as np -import keras_core as keras -from keras_core import layers +import keras as keras +from keras import layers import tensorflow_datasets as tfds import matplotlib.pyplot as plt diff --git a/guides/understanding_masking_and_padding.py b/guides/understanding_masking_and_padding.py index 30cfa3ad0..0250ac98f 100644 --- a/guides/understanding_masking_and_padding.py +++ b/guides/understanding_masking_and_padding.py @@ -10,9 +10,9 @@ Accelerator: None ## Setup """ import numpy as np -import keras_core as keras -from keras_core import ops -from keras_core import layers +import keras as keras +from keras import ops +from keras import layers """ ## Introduction diff --git a/guides/writing_a_custom_training_loop_in_jax.py b/guides/writing_a_custom_training_loop_in_jax.py index d19e6ddf1..1da718a40 100644 --- a/guides/writing_a_custom_training_loop_in_jax.py +++ b/guides/writing_a_custom_training_loop_in_jax.py @@ -19,7 +19,7 @@ import jax # We import TF so we can use tf.data. import tensorflow as tf -import keras_core as keras +import keras as keras import numpy as np """ @@ -45,7 +45,7 @@ your own training & evaluation loops from scratch. This is what this guide is ab To write a custom training loop, we need the following ingredients: - A model to train, of course. -- An optimizer. You could either use an optimizer from `keras_core.optimizers`, or +- An optimizer. You could either use an optimizer from `keras.optimizers`, or one from the `optax` package. - A loss function. - A dataset. The standard in the JAX ecosystem is to load data via `tf.data`, diff --git a/guides/writing_a_custom_training_loop_in_tensorflow.py b/guides/writing_a_custom_training_loop_in_tensorflow.py index 540d48924..523c9427e 100644 --- a/guides/writing_a_custom_training_loop_in_tensorflow.py +++ b/guides/writing_a_custom_training_loop_in_tensorflow.py @@ -17,7 +17,7 @@ import os os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf -import keras_core as keras +import keras as keras import numpy as np """ diff --git a/guides/writing_a_custom_training_loop_in_torch.py b/guides/writing_a_custom_training_loop_in_torch.py index 470cd5e28..52f6070c1 100644 --- a/guides/writing_a_custom_training_loop_in_torch.py +++ b/guides/writing_a_custom_training_loop_in_torch.py @@ -16,7 +16,7 @@ import os os.environ["KERAS_BACKEND"] = "torch" import torch -import keras_core as keras +import keras as keras import numpy as np """ @@ -42,9 +42,9 @@ your own training & evaluation loops from scratch. This is what this guide is ab To write a custom training loop, we need the following ingredients: - A model to train, of course. -- An optimizer. You could either use a `keras_core.optimizers` optimizer, +- An optimizer. You could either use a `keras.optimizers` optimizer, or a native PyTorch optimizer from `torch.optim`. -- A loss function. You could either use a `keras_core.losses` loss, +- A loss function. You could either use a `keras.losses` loss, or a native PyTorch loss from `torch.nn`. - A dataset. You could use any format: a `tf.data.Dataset`, a PyTorch `DataLoader`, a Python generator, etc. diff --git a/guides/writing_your_own_callbacks.py b/guides/writing_your_own_callbacks.py index 10eeba23b..dbd0ccfc2 100644 --- a/guides/writing_your_own_callbacks.py +++ b/guides/writing_your_own_callbacks.py @@ -24,7 +24,7 @@ started. """ import numpy as np -import keras_core as keras +import keras as keras """ ## Keras callbacks overview diff --git a/integration_tests/distribute_training_test.py b/integration_tests/distribute_training_test.py index cdc9422bb..775d52b49 100644 --- a/integration_tests/distribute_training_test.py +++ b/integration_tests/distribute_training_test.py @@ -1,12 +1,12 @@ import numpy as np import tensorflow as tf -from keras_core import layers -from keras_core import losses -from keras_core import metrics -from keras_core import models -from keras_core import optimizers -from keras_core.utils import rng_utils +from keras import layers +from keras import losses +from keras import metrics +from keras import models +from keras import optimizers +from keras.utils import rng_utils def test_model_fit(): diff --git a/integration_tests/layer_in_torch_workflow.py b/integration_tests/layer_in_torch_workflow.py index 0f56753c0..4d9abab9c 100644 --- a/integration_tests/layer_in_torch_workflow.py +++ b/integration_tests/layer_in_torch_workflow.py @@ -1,7 +1,7 @@ import torch -from keras_core import layers -from keras_core.backend.common import KerasVariable +from keras import layers +from keras.backend.common import KerasVariable class Net(torch.nn.Module): diff --git a/integration_tests/model_visualization_test.py b/integration_tests/model_visualization_test.py index 66999d478..31e866b25 100644 --- a/integration_tests/model_visualization_test.py +++ b/integration_tests/model_visualization_test.py @@ -1,13 +1,13 @@ -import keras_core -from keras_core.utils import plot_model +import keras +from keras.utils import plot_model def plot_sequential_model(): - model = keras_core.Sequential( + model = keras.Sequential( [ - keras_core.Input((3,)), - keras_core.layers.Dense(4, activation="relu"), - keras_core.layers.Dense(1, activation="sigmoid"), + keras.Input((3,)), + keras.layers.Dense(4, activation="relu"), + keras.layers.Dense(1, activation="sigmoid"), ] ) plot_model(model, "sequential.png") @@ -61,22 +61,22 @@ def plot_sequential_model(): def plot_functional_model(): - inputs = keras_core.Input((3,)) - x = keras_core.layers.Dense(4, activation="relu", trainable=False)(inputs) + inputs = keras.Input((3,)) + x = keras.layers.Dense(4, activation="relu", trainable=False)(inputs) residual = x - x = keras_core.layers.Dense(4, activation="relu")(x) - x = keras_core.layers.Dense(4, activation="relu")(x) - x = keras_core.layers.Dense(4, activation="relu")(x) + x = keras.layers.Dense(4, activation="relu")(x) + x = keras.layers.Dense(4, activation="relu")(x) + x = keras.layers.Dense(4, activation="relu")(x) x += residual residual = x - x = keras_core.layers.Dense(4, activation="relu")(x) - x = keras_core.layers.Dense(4, activation="relu")(x) - x = keras_core.layers.Dense(4, activation="relu")(x) + x = keras.layers.Dense(4, activation="relu")(x) + x = keras.layers.Dense(4, activation="relu")(x) + x = keras.layers.Dense(4, activation="relu")(x) x += residual - x = keras_core.layers.Dropout(0.5)(x) - outputs = keras_core.layers.Dense(1, activation="sigmoid")(x) + x = keras.layers.Dropout(0.5)(x) + outputs = keras.layers.Dense(1, activation="sigmoid")(x) - model = keras_core.Model(inputs, outputs) + model = keras.Model(inputs, outputs) plot_model(model, "functional.png") plot_model(model, "functional-show_shapes.png", show_shapes=True) plot_model( @@ -135,11 +135,11 @@ def plot_functional_model(): def plot_subclassed_model(): - class MyModel(keras_core.Model): + class MyModel(keras.Model): def __init__(self, **kwargs): super().__init__(**kwargs) - self.dense_1 = keras_core.layers.Dense(3, activation="relu") - self.dense_2 = keras_core.layers.Dense(1, activation="sigmoid") + self.dense_1 = keras.layers.Dense(3, activation="relu") + self.dense_2 = keras.layers.Dense(1, activation="sigmoid") def call(self, x): return self.dense_2(self.dense_1(x)) @@ -205,25 +205,25 @@ def plot_subclassed_model(): def plot_nested_functional_model(): - inputs = keras_core.Input((3,)) - x = keras_core.layers.Dense(4, activation="relu")(inputs) - x = keras_core.layers.Dense(4, activation="relu")(x) - outputs = keras_core.layers.Dense(3, activation="relu")(x) - inner_model = keras_core.Model(inputs, outputs) + inputs = keras.Input((3,)) + x = keras.layers.Dense(4, activation="relu")(inputs) + x = keras.layers.Dense(4, activation="relu")(x) + outputs = keras.layers.Dense(3, activation="relu")(x) + inner_model = keras.Model(inputs, outputs) - inputs = keras_core.Input((3,)) - x = keras_core.layers.Dense(3, activation="relu", trainable=False)(inputs) + inputs = keras.Input((3,)) + x = keras.layers.Dense(3, activation="relu", trainable=False)(inputs) residual = x x = inner_model(x) x += residual residual = x - x = keras_core.layers.Dense(4, activation="relu")(x) - x = keras_core.layers.Dense(4, activation="relu")(x) - x = keras_core.layers.Dense(3, activation="relu")(x) + x = keras.layers.Dense(4, activation="relu")(x) + x = keras.layers.Dense(4, activation="relu")(x) + x = keras.layers.Dense(3, activation="relu")(x) x += residual - x = keras_core.layers.Dropout(0.5)(x) - outputs = keras_core.layers.Dense(1, activation="sigmoid")(x) - model = keras_core.Model(inputs, outputs) + x = keras.layers.Dropout(0.5)(x) + outputs = keras.layers.Dense(1, activation="sigmoid")(x) + model = keras.Model(inputs, outputs) plot_model(model, "nested-functional.png", expand_nested=True) plot_model( diff --git a/integration_tests/numerical_test.py b/integration_tests/numerical_test.py index 6556573c9..fd798b1e8 100644 --- a/integration_tests/numerical_test.py +++ b/integration_tests/numerical_test.py @@ -1,4 +1,4 @@ -import keras_core # isort: skip, keep it on top for torch test +import keras # isort: skip, keep it on top for torch test import numpy as np from tensorflow import keras @@ -78,27 +78,27 @@ def eval_model(model, x, y): def numerical_test(): x_train, y_train, x_test, y_test = build_mnist_data(NUM_CLASSES) keras_model = build_keras_model(keras, NUM_CLASSES) - keras_core_model = build_keras_model(keras_core, NUM_CLASSES) + keras_model = build_keras_model(keras, NUM_CLASSES) # Make sure both model have same weights before training weights = [weight.numpy() for weight in keras_model.weights] - keras_core_model.set_weights(weights) + keras_model.set_weights(weights) - for kw, kcw in zip(keras_model.weights, keras_core_model.weights): + for kw, kcw in zip(keras_model.weights, keras_model.weights): np.testing.assert_allclose(kw.numpy(), kcw.numpy()) keras_history = train_model(keras_model, x_train, y_train) - keras_core_history = train_model(keras_core_model, x_train, y_train) + keras_history = train_model(keras_model, x_train, y_train) for key in keras_history.history.keys(): np.testing.assert_allclose( keras_history.history[key], - keras_core_history.history[key], + keras_history.history[key], atol=1e-3, ) if __name__ == "__main__": keras.utils.set_random_seed(1337) - keras_core.utils.set_random_seed(1337) + keras.utils.set_random_seed(1337) numerical_test() diff --git a/integration_tests/torch_backend_keras_workflow.py b/integration_tests/torch_backend_keras_workflow.py index 11782dc17..93dc68ca4 100644 --- a/integration_tests/torch_backend_keras_workflow.py +++ b/integration_tests/torch_backend_keras_workflow.py @@ -1,15 +1,15 @@ import numpy as np -import keras_core -from keras_core import layers -from keras_core import ops +import keras +from keras import layers +from keras import ops -keras_core.utils.set_random_seed(1337) +keras.utils.set_random_seed(1337) x = np.random.rand(100, 32, 32, 3) y = np.random.randint(0, 2, size=(100, 1)) # Test sequential model. -model = keras_core.Sequential( +model = keras.Sequential( [ layers.Conv2D(filters=10, kernel_size=3), layers.GlobalAveragePooling2D(), @@ -35,11 +35,11 @@ model.test_on_batch(x, y) model.predict_on_batch(x) # Test functional model. -inputs = keras_core.Input(shape=(32, 32, 3)) +inputs = keras.Input(shape=(32, 32, 3)) outputs = layers.Conv2D(filters=10, kernel_size=3)(inputs) outputs = layers.GlobalAveragePooling2D()(outputs) outputs = layers.Dense(1, activation="sigmoid")(outputs) -model = keras_core.Model(inputs, outputs) +model = keras.Model(inputs, outputs) model.compile( loss="binary_crossentropy", optimizer="adam", metrics=["mae", "accuracy"] ) @@ -74,12 +74,12 @@ class Linear(layers.Layer): return ops.matmul(inputs, self.w) + self.b -inputs = keras_core.Input(shape=(32, 32, 3)) +inputs = keras.Input(shape=(32, 32, 3)) outputs = layers.Conv2D(filters=10, kernel_size=3)(inputs) outputs = layers.GlobalAveragePooling2D()(outputs) outputs = Linear(1)(outputs) outputs = layers.Activation("sigmoid")(outputs) -model = keras_core.Model(inputs, outputs) +model = keras.Model(inputs, outputs) model.compile( loss="binary_crossentropy", optimizer="adam", metrics=["mae", "accuracy"] ) diff --git a/keras/__init__.py b/keras/__init__.py new file mode 100644 index 000000000..4cda3efc0 --- /dev/null +++ b/keras/__init__.py @@ -0,0 +1,19 @@ +from keras import activations +from keras import applications +from keras import backend +from keras import constraints +from keras import datasets +from keras import initializers +from keras import layers +from keras import models +from keras import ops +from keras import optimizers +from keras import regularizers +from keras import utils +from keras.backend import KerasTensor +from keras.layers import Input +from keras.layers import Layer +from keras.models import Functional +from keras.models import Model +from keras.models import Sequential +from keras.version import __version__ diff --git a/keras_core/activations/__init__.py b/keras/activations/__init__.py similarity index 61% rename from keras_core/activations/__init__.py rename to keras/activations/__init__.py index 903afc594..a23dead04 100644 --- a/keras_core/activations/__init__.py +++ b/keras/activations/__init__.py @@ -1,25 +1,25 @@ import types -from keras_core.activations.activations import elu -from keras_core.activations.activations import exponential -from keras_core.activations.activations import gelu -from keras_core.activations.activations import hard_sigmoid -from keras_core.activations.activations import leaky_relu -from keras_core.activations.activations import linear -from keras_core.activations.activations import log_softmax -from keras_core.activations.activations import mish -from keras_core.activations.activations import relu -from keras_core.activations.activations import relu6 -from keras_core.activations.activations import selu -from keras_core.activations.activations import sigmoid -from keras_core.activations.activations import silu -from keras_core.activations.activations import softmax -from keras_core.activations.activations import softplus -from keras_core.activations.activations import softsign -from keras_core.activations.activations import tanh -from keras_core.api_export import keras_core_export -from keras_core.saving import object_registration -from keras_core.saving import serialization_lib +from keras.activations.activations import elu +from keras.activations.activations import exponential +from keras.activations.activations import gelu +from keras.activations.activations import hard_sigmoid +from keras.activations.activations import leaky_relu +from keras.activations.activations import linear +from keras.activations.activations import log_softmax +from keras.activations.activations import mish +from keras.activations.activations import relu +from keras.activations.activations import relu6 +from keras.activations.activations import selu +from keras.activations.activations import sigmoid +from keras.activations.activations import silu +from keras.activations.activations import softmax +from keras.activations.activations import softplus +from keras.activations.activations import softsign +from keras.activations.activations import tanh +from keras.api_export import keras_export +from keras.saving import object_registration +from keras.saving import serialization_lib ALL_OBJECTS = { relu, @@ -46,7 +46,7 @@ ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS} ALL_OBJECTS_DICT["swish"] = silu -@keras_core_export("keras_core.activations.serialize") +@keras_export("keras.activations.serialize") def serialize(activation): fn_config = serialization_lib.serialize_keras_object(activation) if "config" not in fn_config: @@ -55,7 +55,7 @@ def serialize(activation): "serialized due to invalid function name. Make sure to use " "an activation name that matches the references defined in " "activations.py or use " - "`@keras_core.saving.register_keras_serializable()`" + "`@keras.saving.register_keras_serializable()`" "to register any custom activations. " f"config={fn_config}" ) @@ -75,7 +75,7 @@ def serialize(activation): return fn_config["config"] -@keras_core_export("keras_core.activations.deserialize") +@keras_export("keras.activations.deserialize") def deserialize(config, custom_objects=None): """Return a Keras activation function via its config.""" return serialization_lib.deserialize_keras_object( @@ -85,7 +85,7 @@ def deserialize(config, custom_objects=None): ) -@keras_core_export("keras_core.activations.get") +@keras_export("keras.activations.get") def get(identifier): """Retrieve a Keras activation function via an identifier.""" if identifier is None: diff --git a/keras_core/activations/activations.py b/keras/activations/activations.py similarity index 87% rename from keras_core/activations/activations.py rename to keras/activations/activations.py index 7ee45ede7..41661fcc2 100644 --- a/keras_core/activations/activations.py +++ b/keras/activations/activations.py @@ -1,9 +1,9 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export +from keras import backend +from keras import ops +from keras.api_export import keras_export -@keras_core_export("keras_core.activations.relu") +@keras_export("keras.activations.relu") def relu(x, negative_slope=0.0, max_value=None, threshold=0.0): """Applies the rectified linear unit activation function. @@ -17,13 +17,13 @@ def relu(x, negative_slope=0.0, max_value=None, threshold=0.0): Examples: >>> x = [-10, -5, 0.0, 5, 10] - >>> keras_core.activations.relu(x) + >>> keras.activations.relu(x) [ 0., 0., 0., 5., 10.] - >>> keras_core.activations.relu(x, negative_slope=0.5) + >>> keras.activations.relu(x, negative_slope=0.5) [-5. , -2.5, 0. , 5. , 10. ] - >>> keras_core.activations.relu(x, max_value=5.) + >>> keras.activations.relu(x, max_value=5.) [0., 0., 0., 5., 5.] - >>> keras_core.activations.relu(x, threshold=5.) + >>> keras.activations.relu(x, threshold=5.) [-0., -0., 0., 0., 10.] Args: @@ -108,7 +108,7 @@ class ReLU(ops.Operation): return x -@keras_core_export("keras_core.activations.leaky_relu") +@keras_export("keras.activations.leaky_relu") def leaky_relu(x, negative_slope=0.2): """Leaky relu activation function. @@ -120,7 +120,7 @@ def leaky_relu(x, negative_slope=0.2): return ops.leaky_relu(x, negative_slope=negative_slope) -@keras_core_export("keras_core.activations.relu6") +@keras_export("keras.activations.relu6") def relu6(x): """Relu6 activation function. @@ -132,7 +132,7 @@ def relu6(x): return ops.relu6(x) -@keras_core_export("keras_core.activations.softmax") +@keras_export("keras.activations.softmax") def softmax(x, axis=-1): """Softmax converts a vector of values to a probability distribution. @@ -165,7 +165,7 @@ def softmax(x, axis=-1): return output -@keras_core_export("keras_core.activations.elu") +@keras_export("keras.activations.elu") def elu(x, alpha=1.0): """Exponential Linear Unit. @@ -193,7 +193,7 @@ def elu(x, alpha=1.0): return ops.elu(x, alpha=alpha) -@keras_core_export("keras_core.activations.selu") +@keras_export("keras.activations.selu") def selu(x): """Scaled Exponential Linear Unit (SELU). @@ -206,13 +206,13 @@ def selu(x): (`alpha=1.67326324` and `scale=1.05070098`). Basically, the SELU activation function multiplies `scale` (> 1) with the - output of the `keras_core.activations.elu` function to ensure a slope larger + output of the `keras.activations.elu` function to ensure a slope larger than one for positive inputs. The values of `alpha` and `scale` are chosen so that the mean and variance of the inputs are preserved between two consecutive layers as long as the weights are initialized - correctly (see `keras_core.initializers.LecunNormal` initializer) + correctly (see `keras.initializers.LecunNormal` initializer) and the number of input units is "large enough" (see reference paper for more information). @@ -222,9 +222,9 @@ def selu(x): Notes: - To be used together with the - `keras_core.initializers.LecunNormal` initializer. + `keras.initializers.LecunNormal` initializer. - To be used together with the dropout variant - `keras_core.layers.AlphaDropout` (rather than regular dropout). + `keras.layers.AlphaDropout` (rather than regular dropout). Reference: @@ -233,7 +233,7 @@ def selu(x): return ops.selu(x) -@keras_core_export("keras_core.activations.softplus") +@keras_export("keras.activations.softplus") def softplus(x): """Softplus activation function. @@ -245,7 +245,7 @@ def softplus(x): return ops.softplus(x) -@keras_core_export("keras_core.activations.softsign") +@keras_export("keras.activations.softsign") def softsign(x): """Softsign activation function. @@ -257,8 +257,8 @@ def softsign(x): return ops.softsign(x) -@keras_core_export( - ["keras_core.activations.silu", "keras_core.activations.swish"] +@keras_export( + ["keras.activations.silu", "keras.activations.swish"] ) def silu(x): """Swish (or Silu) activation function. @@ -279,7 +279,7 @@ def silu(x): return ops.silu(x) -@keras_core_export("keras_core.activations.gelu") +@keras_export("keras.activations.gelu") def gelu(x, approximate=False): """Gaussian error linear unit (GELU) activation function. @@ -302,7 +302,7 @@ def gelu(x, approximate=False): return ops.gelu(x, approximate=approximate) -@keras_core_export("keras_core.activations.tanh") +@keras_export("keras.activations.tanh") def tanh(x): """Hyperbolic tangent activation function. @@ -316,7 +316,7 @@ def tanh(x): return ops.tanh(x) -@keras_core_export("keras_core.activations.sigmoid") +@keras_export("keras.activations.sigmoid") def sigmoid(x): """Sigmoid activation function. @@ -343,7 +343,7 @@ def sigmoid(x): return output -@keras_core_export("keras_core.activations.exponential") +@keras_export("keras.activations.exponential") def exponential(x): """Exponential activation function. @@ -353,7 +353,7 @@ def exponential(x): return ops.exp(x) -@keras_core_export("keras_core.activations.hard_sigmoid") +@keras_export("keras.activations.hard_sigmoid") def hard_sigmoid(x): """Hard sigmoid activation function. @@ -376,7 +376,7 @@ def hard_sigmoid(x): return ops.hard_sigmoid(x) -@keras_core_export("keras_core.activations.linear") +@keras_export("keras.activations.linear") def linear(x): """Linear activation function (pass-through). @@ -401,7 +401,7 @@ class Mish(ops.Operation): return x * backend.nn.tanh(backend.nn.softplus(x)) -@keras_core_export("keras_core.activations.mish") +@keras_export("keras.activations.mish") def mish(x): """Mish activation function. @@ -424,7 +424,7 @@ def mish(x): return Mish.static_call(x) -@keras_core_export("keras_core.activations.log_softmax") +@keras_export("keras.activations.log_softmax") def log_softmax(x, axis=-1): """Log-Softmax activation function. diff --git a/keras_core/activations/activations_test.py b/keras/activations/activations_test.py similarity index 99% rename from keras_core/activations/activations_test.py rename to keras/activations/activations_test.py index ec7dbd826..6220cd295 100644 --- a/keras_core/activations/activations_test.py +++ b/keras/activations/activations_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras_core import activations -from keras_core import backend -from keras_core import testing +from keras import activations +from keras import backend +from keras import testing def _ref_softmax(values): diff --git a/keras_core/api_export.py b/keras/api_export.py similarity index 88% rename from keras_core/api_export.py rename to keras/api_export.py index a1623f997..0e8a554b9 100644 --- a/keras_core/api_export.py +++ b/keras/api_export.py @@ -30,9 +30,9 @@ def get_name_from_symbol(symbol): if namex: - class keras_core_export(namex.export): + class keras_export(namex.export): def __init__(self, path): - super().__init__(package="keras_core", path=path) + super().__init__(package="keras", path=path) def __call__(self, symbol): register_internal_serializable(self.path, symbol) @@ -40,7 +40,7 @@ if namex: else: - class keras_core_export: + class keras_export: def __init__(self, path): self.path = path diff --git a/keras_core/applications/__init__.py b/keras/applications/__init__.py similarity index 100% rename from keras_core/applications/__init__.py rename to keras/applications/__init__.py diff --git a/keras_core/applications/applications_test.py b/keras/applications/applications_test.py similarity index 88% rename from keras_core/applications/applications_test.py rename to keras/applications/applications_test.py index 24f23fd30..4fe60055e 100644 --- a/keras_core/applications/applications_test.py +++ b/keras/applications/applications_test.py @@ -4,26 +4,26 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import testing -from keras_core.applications import convnext -from keras_core.applications import densenet -from keras_core.applications import efficientnet -from keras_core.applications import efficientnet_v2 -from keras_core.applications import inception_resnet_v2 -from keras_core.applications import inception_v3 -from keras_core.applications import mobilenet -from keras_core.applications import mobilenet_v2 -from keras_core.applications import mobilenet_v3 -from keras_core.applications import nasnet -from keras_core.applications import resnet -from keras_core.applications import resnet_v2 -from keras_core.applications import vgg16 -from keras_core.applications import vgg19 -from keras_core.applications import xception -from keras_core.saving import serialization_lib -from keras_core.utils import file_utils -from keras_core.utils import image_utils +from keras import backend +from keras import testing +from keras.applications import convnext +from keras.applications import densenet +from keras.applications import efficientnet +from keras.applications import efficientnet_v2 +from keras.applications import inception_resnet_v2 +from keras.applications import inception_v3 +from keras.applications import mobilenet +from keras.applications import mobilenet_v2 +from keras.applications import mobilenet_v3 +from keras.applications import nasnet +from keras.applications import resnet +from keras.applications import resnet_v2 +from keras.applications import vgg16 +from keras.applications import vgg19 +from keras.applications import xception +from keras.saving import serialization_lib +from keras.utils import file_utils +from keras.utils import image_utils try: import PIL diff --git a/keras_core/applications/convnext.py b/keras/applications/convnext.py similarity index 94% rename from keras_core/applications/convnext.py rename to keras/applications/convnext.py index f8084f74b..e52ab4b93 100644 --- a/keras_core/applications/convnext.py +++ b/keras/applications/convnext.py @@ -1,17 +1,17 @@ import numpy as np -from keras_core import backend -from keras_core import initializers -from keras_core import layers -from keras_core import ops -from keras_core import random -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.layers.layer import Layer -from keras_core.models import Functional -from keras_core.models import Sequential -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import initializers +from keras import layers +from keras import ops +from keras import random +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.layers.layer import Layer +from keras.models import Functional +from keras.models import Sequential +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/convnext/" @@ -539,10 +539,10 @@ def ConvNeXt( ## Instantiating variants ## -@keras_core_export( +@keras_export( [ - "keras_core.applications.convnext.ConvNeXtTiny", - "keras_core.applications.ConvNeXtTiny", + "keras.applications.convnext.ConvNeXtTiny", + "keras.applications.ConvNeXtTiny", ] ) def ConvNeXtTiny( @@ -574,10 +574,10 @@ def ConvNeXtTiny( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.convnext.ConvNeXtSmall", - "keras_core.applications.ConvNeXtSmall", + "keras.applications.convnext.ConvNeXtSmall", + "keras.applications.ConvNeXtSmall", ] ) def ConvNeXtSmall( @@ -609,10 +609,10 @@ def ConvNeXtSmall( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.convnext.ConvNeXtBase", - "keras_core.applications.ConvNeXtBase", + "keras.applications.convnext.ConvNeXtBase", + "keras.applications.ConvNeXtBase", ] ) def ConvNeXtBase( @@ -644,10 +644,10 @@ def ConvNeXtBase( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.convnext.ConvNeXtLarge", - "keras_core.applications.ConvNeXtLarge", + "keras.applications.convnext.ConvNeXtLarge", + "keras.applications.ConvNeXtLarge", ] ) def ConvNeXtLarge( @@ -679,10 +679,10 @@ def ConvNeXtLarge( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.convnext.ConvNeXtXLarge", - "keras_core.applications.ConvNeXtXLarge", + "keras.applications.convnext.ConvNeXtXLarge", + "keras.applications.ConvNeXtXLarge", ] ) def ConvNeXtXLarge( @@ -721,7 +721,7 @@ ConvNeXtLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtLarge") ConvNeXtXLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtXLarge") -@keras_core_export("keras_core.applications.convnext.preprocess_input") +@keras_export("keras.applications.convnext.preprocess_input") def preprocess_input(x, data_format=None): """A placeholder method for backward compatibility. @@ -734,7 +734,7 @@ def preprocess_input(x, data_format=None): x: A floating point `numpy.array` or a tensor. data_format: Optional data format of the image tensor/array. Defaults to None, in which case the global setting - `keras_core.backend.image_data_format()` is used + `keras.backend.image_data_format()` is used (unless you changed it, it defaults to `"channels_last"`).{mode} Returns: @@ -743,7 +743,7 @@ def preprocess_input(x, data_format=None): return x -@keras_core_export("keras_core.applications.convnext.decode_predictions") +@keras_export("keras.applications.convnext.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/densenet.py b/keras/applications/densenet.py similarity index 94% rename from keras_core/applications/densenet.py rename to keras/applications/densenet.py index 5ac493127..614a444cb 100644 --- a/keras_core/applications/densenet.py +++ b/keras/applications/densenet.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/densenet/" @@ -132,7 +132,7 @@ def DenseNet( https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. - For DenseNet, call `keras_core.applications.densenet.preprocess_input` + For DenseNet, call `keras.applications.densenet.preprocess_input` on your inputs before passing them to the model. `densenet.preprocess_input` will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet @@ -317,10 +317,10 @@ def DenseNet( return model -@keras_core_export( +@keras_export( [ - "keras_core.applications.densenet.DenseNet121", - "keras_core.applications.DenseNet121", + "keras.applications.densenet.DenseNet121", + "keras.applications.DenseNet121", ] ) def DenseNet121( @@ -345,10 +345,10 @@ def DenseNet121( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.densenet.DenseNet169", - "keras_core.applications.DenseNet169", + "keras.applications.densenet.DenseNet169", + "keras.applications.DenseNet169", ] ) def DenseNet169( @@ -373,10 +373,10 @@ def DenseNet169( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.densenet.DenseNet201", - "keras_core.applications.DenseNet201", + "keras.applications.densenet.DenseNet201", + "keras.applications.DenseNet201", ] ) def DenseNet201( @@ -401,14 +401,14 @@ def DenseNet201( ) -@keras_core_export("keras_core.applications.densenet.preprocess_input") +@keras_export("keras.applications.densenet.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="torch" ) -@keras_core_export("keras_core.applications.densenet.decode_predictions") +@keras_export("keras.applications.densenet.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) @@ -431,7 +431,7 @@ Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Note: each Keras Application expects a specific kind of input preprocessing. -For DenseNet, call `keras_core.applications.densenet.preprocess_input` +For DenseNet, call `keras.applications.densenet.preprocess_input` on your inputs before passing them to the model. Args: diff --git a/keras_core/applications/efficientnet.py b/keras/applications/efficientnet.py similarity index 93% rename from keras_core/applications/efficientnet.py rename to keras/applications/efficientnet.py index 471e1d8fb..eae679b5b 100644 --- a/keras_core/applications/efficientnet.py +++ b/keras/applications/efficientnet.py @@ -1,13 +1,13 @@ import copy import math -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHTS_PATH = "https://storage.googleapis.com/keras-applications/" @@ -157,7 +157,7 @@ https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. For EfficientNet, input preprocessing is included as part of the model (as a `Rescaling` layer), and thus -`keras_core.applications.efficientnet.preprocess_input` is actually a +`keras.applications.efficientnet.preprocess_input` is actually a pass-through function. EfficientNet models expect their inputs to be float tensors of pixels with values in the `[0-255]` range. @@ -549,10 +549,10 @@ def block( return x -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet.EfficientNetB0", - "keras_core.applications.EfficientNetB0", + "keras.applications.efficientnet.EfficientNetB0", + "keras.applications.EfficientNetB0", ] ) def EfficientNetB0( @@ -582,10 +582,10 @@ def EfficientNetB0( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet.EfficientNetB1", - "keras_core.applications.EfficientNetB1", + "keras.applications.efficientnet.EfficientNetB1", + "keras.applications.EfficientNetB1", ] ) def EfficientNetB1( @@ -615,10 +615,10 @@ def EfficientNetB1( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet.EfficientNetB2", - "keras_core.applications.EfficientNetB2", + "keras.applications.efficientnet.EfficientNetB2", + "keras.applications.EfficientNetB2", ] ) def EfficientNetB2( @@ -648,10 +648,10 @@ def EfficientNetB2( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet.EfficientNetB3", - "keras_core.applications.EfficientNetB3", + "keras.applications.efficientnet.EfficientNetB3", + "keras.applications.EfficientNetB3", ] ) def EfficientNetB3( @@ -681,10 +681,10 @@ def EfficientNetB3( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet.EfficientNetB4", - "keras_core.applications.EfficientNetB4", + "keras.applications.efficientnet.EfficientNetB4", + "keras.applications.EfficientNetB4", ] ) def EfficientNetB4( @@ -714,10 +714,10 @@ def EfficientNetB4( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet.EfficientNetB5", - "keras_core.applications.EfficientNetB5", + "keras.applications.efficientnet.EfficientNetB5", + "keras.applications.EfficientNetB5", ] ) def EfficientNetB5( @@ -747,10 +747,10 @@ def EfficientNetB5( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet.EfficientNetB6", - "keras_core.applications.EfficientNetB6", + "keras.applications.efficientnet.EfficientNetB6", + "keras.applications.EfficientNetB6", ] ) def EfficientNetB6( @@ -780,10 +780,10 @@ def EfficientNetB6( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet.EfficientNetB7", - "keras_core.applications.EfficientNetB7", + "keras.applications.efficientnet.EfficientNetB7", + "keras.applications.EfficientNetB7", ] ) def EfficientNetB7( @@ -823,7 +823,7 @@ EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB6") EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB7") -@keras_core_export("keras_core.applications.efficientnet.preprocess_input") +@keras_export("keras.applications.efficientnet.preprocess_input") def preprocess_input(x, data_format=None): """A placeholder method for backward compatibility. @@ -835,7 +835,7 @@ def preprocess_input(x, data_format=None): Args: x: A floating point `numpy.array` or a tensor. data_format: Optional data format of the image tensor/array. `None` - means the global setting `keras_core.backend.image_data_format()` + means the global setting `keras.backend.image_data_format()` is used (unless you changed it, it uses `"channels_last"`). Defaults to `None`. @@ -845,7 +845,7 @@ def preprocess_input(x, data_format=None): return x -@keras_core_export("keras_core.applications.efficientnet.decode_predictions") +@keras_export("keras.applications.efficientnet.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/efficientnet_v2.py b/keras/applications/efficientnet_v2.py similarity index 96% rename from keras_core/applications/efficientnet_v2.py rename to keras/applications/efficientnet_v2.py index 011223edf..3b798ec62 100644 --- a/keras_core/applications/efficientnet_v2.py +++ b/keras/applications/efficientnet_v2.py @@ -1,14 +1,14 @@ import copy import math -from keras_core import backend -from keras_core import initializers -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import initializers +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHTS_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/" # noqa: E501 @@ -538,7 +538,7 @@ https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. For EfficientNetV2, by default input preprocessing is included as a part of the model (as a `Rescaling` layer), and thus -`keras_core.applications.efficientnet_v2.preprocess_input` is actually a +`keras.applications.efficientnet_v2.preprocess_input` is actually a pass-through function. In this use case, EfficientNetV2 models expect their inputs to be float tensors of pixels with values in the `[0, 255]` range. At the same time, preprocessing as a part of the model (i.e. `Rescaling` @@ -1081,10 +1081,10 @@ def EfficientNetV2( return model -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet_v2.EfficientNetV2B0", - "keras_core.applications.EfficientNetV2B0", + "keras.applications.efficientnet_v2.EfficientNetV2B0", + "keras.applications.EfficientNetV2B0", ] ) def EfficientNetV2B0( @@ -1113,10 +1113,10 @@ def EfficientNetV2B0( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet_v2.EfficientNetV2B1", - "keras_core.applications.EfficientNetV2B1", + "keras.applications.efficientnet_v2.EfficientNetV2B1", + "keras.applications.EfficientNetV2B1", ] ) def EfficientNetV2B1( @@ -1145,10 +1145,10 @@ def EfficientNetV2B1( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet_v2.EfficientNetV2B2", - "keras_core.applications.EfficientNetV2B2", + "keras.applications.efficientnet_v2.EfficientNetV2B2", + "keras.applications.EfficientNetV2B2", ] ) def EfficientNetV2B2( @@ -1177,10 +1177,10 @@ def EfficientNetV2B2( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet_v2.EfficientNetV2B3", - "keras_core.applications.EfficientNetV2B3", + "keras.applications.efficientnet_v2.EfficientNetV2B3", + "keras.applications.EfficientNetV2B3", ] ) def EfficientNetV2B3( @@ -1209,10 +1209,10 @@ def EfficientNetV2B3( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet_v2.EfficientNetV2S", - "keras_core.applications.EfficientNetV2S", + "keras.applications.efficientnet_v2.EfficientNetV2S", + "keras.applications.EfficientNetV2S", ] ) def EfficientNetV2S( @@ -1241,10 +1241,10 @@ def EfficientNetV2S( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet_v2.EfficientNetV2M", - "keras_core.applications.EfficientNetV2M", + "keras.applications.efficientnet_v2.EfficientNetV2M", + "keras.applications.EfficientNetV2M", ] ) def EfficientNetV2M( @@ -1273,10 +1273,10 @@ def EfficientNetV2M( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.efficientnet_v2.EfficientNetV2L", - "keras_core.applications.EfficientNetV2L", + "keras.applications.efficientnet_v2.EfficientNetV2L", + "keras.applications.EfficientNetV2L", ] ) def EfficientNetV2L( @@ -1314,7 +1314,7 @@ EfficientNetV2M.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2M") EfficientNetV2L.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2L") -@keras_core_export("keras_core.applications.efficientnet_v2.preprocess_input") +@keras_export("keras.applications.efficientnet_v2.preprocess_input") def preprocess_input(x, data_format=None): """A placeholder method for backward compatibility. @@ -1327,7 +1327,7 @@ def preprocess_input(x, data_format=None): x: A floating point `numpy.array` or a tensor. data_format: Optional data format of the image tensor/array. Defaults to None, in which case the global setting - `keras_core.backend.image_data_format()` is used + `keras.backend.image_data_format()` is used (unless you changed it, it defaults to "channels_last").{mode} Returns: @@ -1336,7 +1336,7 @@ def preprocess_input(x, data_format=None): return x -@keras_core_export("keras_core.applications.efficientnet_v2.decode_predictions") +@keras_export("keras.applications.efficientnet_v2.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/imagenet_utils.py b/keras/applications/imagenet_utils.py similarity index 96% rename from keras_core/applications/imagenet_utils.py rename to keras/applications/imagenet_utils.py index 187723de2..a3b266d2a 100644 --- a/keras_core/applications/imagenet_utils.py +++ b/keras/applications/imagenet_utils.py @@ -3,11 +3,11 @@ import warnings import numpy as np -from keras_core import activations -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.utils import file_utils +from keras import activations +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.utils import file_utils CLASS_INDEX = None CLASS_INDEX_PATH = ( @@ -22,12 +22,12 @@ PREPROCESS_INPUT_DOC = """ Usage example with `applications.MobileNet`: ```python - i = keras_core.layers.Input([None, None, 3], dtype="uint8") + i = keras.layers.Input([None, None, 3], dtype="uint8") x = ops.cast(i, "float32") - x = keras_core.applications.mobilenet.preprocess_input(x) - core = keras_core.applications.MobileNet() + x = keras.applications.mobilenet.preprocess_input(x) + core = keras.applications.MobileNet() x = core(x) - model = keras_core.Model(inputs=[i], outputs=[x]) + model = keras.Model(inputs=[i], outputs=[x]) result = model(image) ``` @@ -39,7 +39,7 @@ PREPROCESS_INPUT_DOC = """ if the data types are compatible. To avoid this behaviour, `numpy.copy(x)` can be used. data_format: Optional data format of the image tensor/array. None, means - the global setting `keras_core.backend.image_data_format()` is used + the global setting `keras.backend.image_data_format()` is used (unless you changed it, it uses "channels_last").{mode} Defaults to `None`. @@ -83,7 +83,7 @@ PREPROCESS_INPUT_RET_DOC_CAFFE = """ zero-centered with respect to the ImageNet dataset, without scaling.""" -@keras_core_export("keras_core.applications.imagenet_utils.preprocess_input") +@keras_export("keras.applications.imagenet_utils.preprocess_input") def preprocess_input(x, data_format=None, mode="caffe"): """Preprocesses a tensor or Numpy array encoding a batch of images.""" if mode not in {"caffe", "tf", "torch"}: @@ -113,7 +113,7 @@ preprocess_input.__doc__ = PREPROCESS_INPUT_DOC.format( ) -@keras_core_export("keras_core.applications.imagenet_utils.decode_predictions") +@keras_export("keras.applications.imagenet_utils.decode_predictions") def decode_predictions(preds, top=5): """Decodes the prediction of an ImageNet model. diff --git a/keras_core/applications/imagenet_utils_test.py b/keras/applications/imagenet_utils_test.py similarity index 98% rename from keras_core/applications/imagenet_utils_test.py rename to keras/applications/imagenet_utils_test.py index 6e61ccefb..ff0b8e71d 100644 --- a/keras_core/applications/imagenet_utils_test.py +++ b/keras/applications/imagenet_utils_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest from absl.testing import parameterized -import keras_core as keras -from keras_core import testing -from keras_core.applications import imagenet_utils as utils -from keras_core.mixed_precision import set_dtype_policy +import keras as keras +from keras import testing +from keras.applications import imagenet_utils as utils +from keras.mixed_precision import set_dtype_policy class TestImageNetUtils(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/applications/inception_resnet_v2.py b/keras/applications/inception_resnet_v2.py similarity index 95% rename from keras_core/applications/inception_resnet_v2.py rename to keras/applications/inception_resnet_v2.py index 6ce2501ba..b1e2bb9a3 100644 --- a/keras_core/applications/inception_resnet_v2.py +++ b/keras/applications/inception_resnet_v2.py @@ -1,11 +1,11 @@ -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.layers.layer import Layer -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.layers.layer import Layer +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHT_URL = ( "https://storage.googleapis.com/tensorflow/" @@ -13,10 +13,10 @@ BASE_WEIGHT_URL = ( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.inception_resnet_v2.InceptionResNetV2", - "keras_core.applications.InceptionResNetV2", + "keras.applications.inception_resnet_v2.InceptionResNetV2", + "keras.applications.InceptionResNetV2", ] ) def InceptionResNetV2( @@ -48,7 +48,7 @@ def InceptionResNetV2( Note: each Keras Application expects a specific kind of input preprocessing. For InceptionResNetV2, call - `keras_core.applications.inception_resnet_v2.preprocess_input` + `keras.applications.inception_resnet_v2.preprocess_input` on your inputs before passing them to the model. `inception_resnet_v2.preprocess_input` will scale input pixels between -1 and 1. @@ -374,8 +374,8 @@ def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"): return x -@keras_core_export( - "keras_core.applications.inception_resnet_v2.preprocess_input" +@keras_export( + "keras.applications.inception_resnet_v2.preprocess_input" ) def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( @@ -383,8 +383,8 @@ def preprocess_input(x, data_format=None): ) -@keras_core_export( - "keras_core.applications.inception_resnet_v2.decode_predictions" +@keras_export( + "keras.applications.inception_resnet_v2.decode_predictions" ) def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/inception_v3.py b/keras/applications/inception_v3.py similarity index 95% rename from keras_core/applications/inception_v3.py rename to keras/applications/inception_v3.py index b598d2ddc..beab28150 100644 --- a/keras_core/applications/inception_v3.py +++ b/keras/applications/inception_v3.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/" @@ -16,10 +16,10 @@ WEIGHTS_PATH_NO_TOP = ( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.inception_v3.InceptionV3", - "keras_core.applications.InceptionV3", + "keras.applications.inception_v3.InceptionV3", + "keras.applications.InceptionV3", ] ) def InceptionV3( @@ -50,7 +50,7 @@ def InceptionV3( Note: each Keras Application expects a specific kind of input preprocessing. For `InceptionV3`, call - `keras_core.applications.inception_v3.preprocess_input` on your inputs + `keras.applications.inception_v3.preprocess_input` on your inputs before passing them to the model. `inception_v3.preprocess_input` will scale input pixels between -1 and 1. @@ -420,14 +420,14 @@ def conv2d_bn( return x -@keras_core_export("keras_core.applications.inception_v3.preprocess_input") +@keras_export("keras.applications.inception_v3.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="tf" ) -@keras_core_export("keras_core.applications.inception_v3.decode_predictions") +@keras_export("keras.applications.inception_v3.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/mobilenet.py b/keras/applications/mobilenet.py similarity index 96% rename from keras_core/applications/mobilenet.py rename to keras/applications/mobilenet.py index 3bf7eeeda..32b067ec8 100644 --- a/keras_core/applications/mobilenet.py +++ b/keras/applications/mobilenet.py @@ -1,22 +1,22 @@ import warnings -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHT_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/" ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.mobilenet.MobileNet", - "keras_core.applications.MobileNet", + "keras.applications.mobilenet.MobileNet", + "keras.applications.MobileNet", ] ) def MobileNet( @@ -50,7 +50,7 @@ def MobileNet( https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. - For MobileNet, call `keras_core.applications.mobilenet.preprocess_input` + For MobileNet, call `keras.applications.mobilenet.preprocess_input` on your inputs before passing them to the model. `mobilenet.preprocess_input` will scale input pixels between -1 and 1. @@ -408,14 +408,14 @@ def _depthwise_conv_block( return layers.ReLU(6.0, name="conv_pw_%d_relu" % block_id)(x) -@keras_core_export("keras_core.applications.mobilenet.preprocess_input") +@keras_export("keras.applications.mobilenet.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="tf" ) -@keras_core_export("keras_core.applications.mobilenet.decode_predictions") +@keras_export("keras.applications.mobilenet.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/mobilenet_v2.py b/keras/applications/mobilenet_v2.py similarity index 96% rename from keras_core/applications/mobilenet_v2.py rename to keras/applications/mobilenet_v2.py index 3b35fda8f..96db08c32 100644 --- a/keras_core/applications/mobilenet_v2.py +++ b/keras/applications/mobilenet_v2.py @@ -1,22 +1,22 @@ import warnings -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHT_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/" ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.mobilenet_v2.MobileNetV2", - "keras_core.applications.MobileNetV2", + "keras.applications.mobilenet_v2.MobileNetV2", + "keras.applications.MobileNetV2", ] ) def MobileNetV2( @@ -56,7 +56,7 @@ def MobileNetV2( Note: each Keras Application expects a specific kind of input preprocessing. For MobileNetV2, call - `keras_core.applications.mobilenet_v2.preprocess_input` + `keras.applications.mobilenet_v2.preprocess_input` on your inputs before passing them to the model. `mobilenet_v2.preprocess_input` will scale input pixels between -1 and 1. @@ -480,14 +480,14 @@ def _make_divisible(v, divisor, min_value=None): return new_v -@keras_core_export("keras_core.applications.mobilenet_v2.preprocess_input") +@keras_export("keras.applications.mobilenet_v2.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="tf" ) -@keras_core_export("keras_core.applications.mobilenet_v2.decode_predictions") +@keras_export("keras.applications.mobilenet_v2.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/mobilenet_v3.py b/keras/applications/mobilenet_v3.py similarity index 97% rename from keras_core/applications/mobilenet_v3.py rename to keras/applications/mobilenet_v3.py index 335dd90bc..3126b3230 100644 --- a/keras_core/applications/mobilenet_v3.py +++ b/keras/applications/mobilenet_v3.py @@ -1,12 +1,12 @@ import warnings -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHT_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/" @@ -69,7 +69,7 @@ https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. For MobileNetV3, by default input preprocessing is included as a part of the model (as a `Rescaling` layer), and thus -`keras_core.applications.mobilenet_v3.preprocess_input` is actually a +`keras.applications.mobilenet_v3.preprocess_input` is actually a pass-through function. In this use case, MobileNetV3 models expect their inputs to be float tensors of pixels with values in the `[0-255]` range. At the same time, preprocessing as a part of the model (i.e. `Rescaling` @@ -396,7 +396,7 @@ def MobileNetV3( return model -@keras_core_export("keras_core.applications.MobileNetV3Small") +@keras_export("keras.applications.MobileNetV3Small") def MobileNetV3Small( input_shape=None, alpha=1.0, @@ -461,7 +461,7 @@ def MobileNetV3Small( ) -@keras_core_export("keras_core.applications.MobileNetV3Large") +@keras_export("keras.applications.MobileNetV3Large") def MobileNetV3Large( input_shape=None, alpha=1.0, @@ -646,7 +646,7 @@ def _inverted_res_block( return x -@keras_core_export("keras_core.applications.mobilenet_v3.preprocess_input") +@keras_export("keras.applications.mobilenet_v3.preprocess_input") def preprocess_input(x, data_format=None): """A placeholder method for backward compatibility. @@ -659,7 +659,7 @@ def preprocess_input(x, data_format=None): x: A floating point `numpy.array` or a tensor. data_format: Optional data format of the image tensor/array. `None` means the global setting - `keras_core.config.image_data_format()` is used + `keras.config.image_data_format()` is used (unless you changed it, it uses `"channels_last"`). Defaults to `None`. @@ -669,7 +669,7 @@ def preprocess_input(x, data_format=None): return x -@keras_core_export("keras_core.applications.mobilenet_v3.decode_predictions") +@keras_export("keras.applications.mobilenet_v3.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/nasnet.py b/keras/applications/nasnet.py similarity index 97% rename from keras_core/applications/nasnet.py rename to keras/applications/nasnet.py index d968756b9..f971a278a 100644 --- a/keras_core/applications/nasnet.py +++ b/keras/applications/nasnet.py @@ -1,12 +1,12 @@ import warnings -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/nasnet/" @@ -47,7 +47,7 @@ def NASNet( https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. - For NasNet, call `keras_core.applications.nasnet.preprocess_input` + For NasNet, call `keras.applications.nasnet.preprocess_input` on your inputs before passing them to the model. `nasnet.preprocess_input` will scale input pixels between -1 and 1. @@ -311,10 +311,10 @@ def NASNet( return model -@keras_core_export( +@keras_export( [ - "keras_core.applications.nasnet.NASNetMobile", - "keras_core.applications.NASNetMobile", + "keras.applications.nasnet.NASNetMobile", + "keras.applications.NASNetMobile", ] ) def NASNetMobile( @@ -337,7 +337,7 @@ def NASNetMobile( the one specified in your Keras config at `~/.keras/keras.json`. Note: each Keras Application expects a specific kind of input preprocessing. - For NASNet, call `keras_core.applications.nasnet.preprocess_input` on your + For NASNet, call `keras.applications.nasnet.preprocess_input` on your inputs before passing them to the model. Args: @@ -402,10 +402,10 @@ def NASNetMobile( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.nasnet.NASNetLarge", - "keras_core.applications.NASNetLarge", + "keras.applications.nasnet.NASNetLarge", + "keras.applications.NASNetLarge", ] ) def NASNetLarge( @@ -428,7 +428,7 @@ def NASNetLarge( the one specified in your Keras config at `~/.keras/keras.json`. Note: each Keras Application expects a specific kind of input preprocessing. - For NASNet, call `keras_core.applications.nasnet.preprocess_input` on your + For NASNet, call `keras.applications.nasnet.preprocess_input` on your inputs before passing them to the model. Args: @@ -841,14 +841,14 @@ def _reduction_a_cell(ip, p, filters, block_id=None): return x, ip -@keras_core_export("keras_core.applications.nasnet.preprocess_input") +@keras_export("keras.applications.nasnet.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="tf" ) -@keras_core_export("keras_core.applications.nasnet.decode_predictions") +@keras_export("keras.applications.nasnet.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/resnet.py b/keras/applications/resnet.py similarity index 94% rename from keras_core/applications/resnet.py rename to keras/applications/resnet.py index 51fb32bbe..a5230e2da 100644 --- a/keras_core/applications/resnet.py +++ b/keras/applications/resnet.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/resnet/" @@ -379,11 +379,11 @@ def stack_residual_blocks_v2(x, filters, blocks, stride1=2, name=None): return x -@keras_core_export( +@keras_export( [ - "keras_core.applications.resnet50.ResNet50", - "keras_core.applications.resnet.ResNet50", - "keras_core.applications.ResNet50", + "keras.applications.resnet50.ResNet50", + "keras.applications.resnet.ResNet50", + "keras.applications.ResNet50", ] ) def ResNet50( @@ -418,10 +418,10 @@ def ResNet50( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.resnet.ResNet101", - "keras_core.applications.ResNet101", + "keras.applications.resnet.ResNet101", + "keras.applications.ResNet101", ] ) def ResNet101( @@ -456,10 +456,10 @@ def ResNet101( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.resnet.ResNet152", - "keras_core.applications.ResNet152", + "keras.applications.resnet.ResNet152", + "keras.applications.ResNet152", ] ) def ResNet152( @@ -494,10 +494,10 @@ def ResNet152( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.resnet50.preprocess_input", - "keras_core.applications.resnet.preprocess_input", + "keras.applications.resnet50.preprocess_input", + "keras.applications.resnet.preprocess_input", ] ) def preprocess_input(x, data_format=None): @@ -506,10 +506,10 @@ def preprocess_input(x, data_format=None): ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.resnet50.decode_predictions", - "keras_core.applications.resnet.decode_predictions", + "keras.applications.resnet50.decode_predictions", + "keras.applications.resnet.decode_predictions", ] ) def decode_predictions(preds, top=5): @@ -537,7 +537,7 @@ For transfer learning use cases, make sure to read the https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. -For ResNet, call `keras_core.applications.resnet.preprocess_input` on your +For ResNet, call `keras.applications.resnet.preprocess_input` on your inputs before passing them to the model. `resnet.preprocess_input` will convert the input images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. diff --git a/keras_core/applications/resnet_v2.py b/keras/applications/resnet_v2.py similarity index 88% rename from keras_core/applications/resnet_v2.py rename to keras/applications/resnet_v2.py index 64fc286d3..21652ffc1 100644 --- a/keras_core/applications/resnet_v2.py +++ b/keras/applications/resnet_v2.py @@ -1,12 +1,12 @@ -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.applications import resnet +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.applications import resnet -@keras_core_export( +@keras_export( [ - "keras_core.applications.ResNet50V2", - "keras_core.applications.resnet_v2.ResNet50V2", + "keras.applications.ResNet50V2", + "keras.applications.resnet_v2.ResNet50V2", ] ) def ResNet50V2( @@ -43,10 +43,10 @@ def ResNet50V2( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.ResNet101V2", - "keras_core.applications.resnet_v2.ResNet101V2", + "keras.applications.ResNet101V2", + "keras.applications.resnet_v2.ResNet101V2", ] ) def ResNet101V2( @@ -83,10 +83,10 @@ def ResNet101V2( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.ResNet152V2", - "keras_core.applications.resnet_v2.ResNet152V2", + "keras.applications.ResNet152V2", + "keras.applications.resnet_v2.ResNet152V2", ] ) def ResNet152V2( @@ -123,14 +123,14 @@ def ResNet152V2( ) -@keras_core_export("keras_core.applications.resnet_v2.preprocess_input") +@keras_export("keras.applications.resnet_v2.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="tf" ) -@keras_core_export("keras_core.applications.resnet_v2.decode_predictions") +@keras_export("keras.applications.resnet_v2.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) @@ -157,7 +157,7 @@ For transfer learning use cases, make sure to read the https://keras.io/guides/transfer_learning/). Note: each Keras Application expects a specific kind of input preprocessing. -For ResNet, call `keras_core.applications.resnet_v2.preprocess_input` on your +For ResNet, call `keras.applications.resnet_v2.preprocess_input` on your inputs before passing them to the model. `resnet_v2.preprocess_input` will scale input pixels between -1 and 1. diff --git a/keras_core/applications/vgg16.py b/keras/applications/vgg16.py similarity index 93% rename from keras_core/applications/vgg16.py rename to keras/applications/vgg16.py index d26037d4a..822ddec97 100644 --- a/keras_core/applications/vgg16.py +++ b/keras/applications/vgg16.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/" @@ -17,8 +17,8 @@ WEIGHTS_PATH_NO_TOP = ( ) -@keras_core_export( - ["keras_core.applications.vgg16.VGG16", "keras_core.applications.VGG16"] +@keras_export( + ["keras.applications.vgg16.VGG16", "keras.applications.VGG16"] ) def VGG16( include_top=True, @@ -46,7 +46,7 @@ def VGG16( The default input size for this model is 224x224. Note: each Keras Application expects a specific kind of input preprocessing. - For VGG16, call `keras_core.applications.vgg16.preprocess_input` on your + For VGG16, call `keras.applications.vgg16.preprocess_input` on your inputs before passing them to the model. `vgg16.preprocess_input` will convert the input images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet @@ -228,14 +228,14 @@ def VGG16( return model -@keras_core_export("keras_core.applications.vgg16.preprocess_input") +@keras_export("keras.applications.vgg16.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="caffe" ) -@keras_core_export("keras_core.applications.vgg16.decode_predictions") +@keras_export("keras.applications.vgg16.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/vgg19.py b/keras/applications/vgg19.py similarity index 93% rename from keras_core/applications/vgg19.py rename to keras/applications/vgg19.py index 1dbb37336..275917fb8 100644 --- a/keras_core/applications/vgg19.py +++ b/keras/applications/vgg19.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/" @@ -17,8 +17,8 @@ WEIGHTS_PATH_NO_TOP = ( ) -@keras_core_export( - ["keras_core.applications.vgg19.VGG19", "keras_core.applications.VGG19"] +@keras_export( + ["keras.applications.vgg19.VGG19", "keras.applications.VGG19"] ) def VGG19( include_top=True, @@ -46,7 +46,7 @@ def VGG19( The default input size for this model is 224x224. Note: each Keras Application expects a specific kind of input preprocessing. - For VGG19, call `keras_core.applications.vgg19.preprocess_input` on your + For VGG19, call `keras.applications.vgg19.preprocess_input` on your inputs before passing them to the model. `vgg19.preprocess_input` will convert the input images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet @@ -236,14 +236,14 @@ def VGG19( return model -@keras_core_export("keras_core.applications.vgg19.preprocess_input") +@keras_export("keras.applications.vgg19.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="caffe" ) -@keras_core_export("keras_core.applications.vgg19.decode_predictions") +@keras_export("keras.applications.vgg19.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras_core/applications/xception.py b/keras/applications/xception.py similarity index 95% rename from keras_core/applications/xception.py rename to keras/applications/xception.py index bfdea1850..93d0a4ac7 100644 --- a/keras_core/applications/xception.py +++ b/keras/applications/xception.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.applications import imagenet_utils -from keras_core.models import Functional -from keras_core.ops import operation_utils -from keras_core.utils import file_utils +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.applications import imagenet_utils +from keras.models import Functional +from keras.ops import operation_utils +from keras.utils import file_utils WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/" @@ -16,10 +16,10 @@ WEIGHTS_PATH_NO_TOP = ( ) -@keras_core_export( +@keras_export( [ - "keras_core.applications.xception.Xception", - "keras_core.applications.Xception", + "keras.applications.xception.Xception", + "keras.applications.Xception", ] ) def Xception( @@ -48,7 +48,7 @@ def Xception( The default input image size for this model is 299x299. Note: each Keras Application expects a specific kind of input preprocessing. - For Xception, call `keras_core.applications.xception.preprocess_input` + For Xception, call `keras.applications.xception.preprocess_input` on your inputs before passing them to the model. `xception.preprocess_input` will scale input pixels between -1 and 1. @@ -333,14 +333,14 @@ def Xception( return model -@keras_core_export("keras_core.applications.xception.preprocess_input") +@keras_export("keras.applications.xception.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="tf" ) -@keras_core_export("keras_core.applications.xception.decode_predictions") +@keras_export("keras.applications.xception.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) diff --git a/keras/backend/__init__.py b/keras/backend/__init__.py new file mode 100644 index 000000000..ebc55f82a --- /dev/null +++ b/keras/backend/__init__.py @@ -0,0 +1,55 @@ +from keras.backend.config import backend + +if backend() == "torch": + # When using the torch backend, + # torch needs to be imported first, otherwise it will segfault + # upon import. + import torch + +from keras.backend.common.keras_tensor import KerasTensor +from keras.backend.common.keras_tensor import any_symbolic_tensors +from keras.backend.common.keras_tensor import is_keras_tensor +from keras.backend.common.name_scope import name_scope +from keras.backend.common.stateless_scope import StatelessScope +from keras.backend.common.stateless_scope import get_stateless_scope +from keras.backend.common.stateless_scope import in_stateless_scope +from keras.backend.common.variables import AutocastScope +from keras.backend.common.variables import get_autocast_scope +from keras.backend.common.variables import is_float_dtype +from keras.backend.common.variables import is_int_dtype +from keras.backend.common.variables import standardize_dtype +from keras.backend.common.variables import standardize_shape +from keras.backend.config import epsilon +from keras.backend.config import floatx +from keras.backend.config import image_data_format +from keras.backend.config import set_epsilon +from keras.backend.config import set_floatx +from keras.backend.config import set_image_data_format +from keras.backend.config import standardize_data_format +from keras.utils.io_utils import print_msg + +# Import backend functions. +if backend() == "tensorflow": + print_msg("Using TensorFlow backend") + from keras.backend.tensorflow import * # noqa: F403 + + distribution_lib = None +elif backend() == "jax": + print_msg("Using JAX backend.") + from keras.backend.jax import * # noqa: F403 +elif backend() == "torch": + print_msg("Using PyTorch backend.") + from keras.backend.torch import * # noqa: F403 + + distribution_lib = None +elif backend() == "numpy": + print_msg( + "Using NumPy backend.\nThe NumPy backend does not support " + "training. It should only be used for inference, evaluation, " + "and debugging." + ) + from keras.backend.numpy import * # noqa: F403 + + distribution_lib = None +else: + raise ValueError(f"Unable to import backend : {backend()}") diff --git a/keras/backend/common/__init__.py b/keras/backend/common/__init__.py new file mode 100644 index 000000000..858e41cc4 --- /dev/null +++ b/keras/backend/common/__init__.py @@ -0,0 +1,9 @@ +from keras.backend.common import backend_utils +from keras.backend.common.variables import AutocastScope +from keras.backend.common.variables import KerasVariable +from keras.backend.common.variables import get_autocast_scope +from keras.backend.common.variables import is_float_dtype +from keras.backend.common.variables import is_int_dtype +from keras.backend.common.variables import standardize_dtype +from keras.backend.common.variables import standardize_shape +from keras.random import random diff --git a/keras_core/backend/common/backend_utils.py b/keras/backend/common/backend_utils.py similarity index 100% rename from keras_core/backend/common/backend_utils.py rename to keras/backend/common/backend_utils.py diff --git a/keras_core/backend/common/backend_utils_test.py b/keras/backend/common/backend_utils_test.py similarity index 96% rename from keras_core/backend/common/backend_utils_test.py rename to keras/backend/common/backend_utils_test.py index 242c54d15..3c737f4ba 100644 --- a/keras_core/backend/common/backend_utils_test.py +++ b/keras/backend/common/backend_utils_test.py @@ -1,19 +1,19 @@ -from keras_core.backend.common.backend_utils import ( +from keras.backend.common.backend_utils import ( _convert_conv_tranpose_padding_args_from_keras_to_jax, ) -from keras_core.backend.common.backend_utils import ( +from keras.backend.common.backend_utils import ( _convert_conv_tranpose_padding_args_from_keras_to_torch, ) -from keras_core.backend.common.backend_utils import ( +from keras.backend.common.backend_utils import ( _get_output_shape_given_tf_padding, ) -from keras_core.backend.common.backend_utils import ( +from keras.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) -from keras_core.backend.common.backend_utils import ( +from keras.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_torch, ) -from keras_core.testing import test_case +from keras.testing import test_case class ConvertConvTransposePaddingArgsJAXTest(test_case.TestCase): diff --git a/keras_core/backend/common/compute_output_spec_test.py b/keras/backend/common/compute_output_spec_test.py similarity index 97% rename from keras_core/backend/common/compute_output_spec_test.py rename to keras/backend/common/compute_output_spec_test.py index 96cbb3efa..def1a942c 100644 --- a/keras_core/backend/common/compute_output_spec_test.py +++ b/keras/backend/common/compute_output_spec_test.py @@ -1,7 +1,7 @@ import pytest -from keras_core import backend -from keras_core import testing +from keras import backend +from keras import testing def example_fn(x): diff --git a/keras_core/backend/common/global_state.py b/keras/backend/common/global_state.py similarity index 75% rename from keras_core/backend/common/global_state.py rename to keras/backend/common/global_state.py index 2d51e5d39..6b5711b58 100644 --- a/keras_core/backend/common/global_state.py +++ b/keras/backend/common/global_state.py @@ -1,7 +1,7 @@ import threading -from keras_core import backend -from keras_core.api_export import keras_core_export +from keras import backend +from keras.api_export import keras_export GLOBAL_STATE_TRACKER = threading.local() GLOBAL_SETTINGS_TRACKER = threading.local() @@ -20,8 +20,8 @@ def get_global_attribute(name, default=None, set_to_default=False): return attr -@keras_core_export( - ["keras_core.utils.clear_session", "keras_core.backend.clear_session"] +@keras_export( + ["keras.utils.clear_session", "keras.backend.clear_session"] ) def clear_session(): """Resets all state generated by Keras. @@ -40,26 +40,26 @@ def clear_session(): for _ in range(100): # Without `clear_session()`, each iteration of this loop will # slightly increase the size of the global state managed by Keras - model = keras_core.Sequential([ - keras_core.layers.Dense(10) for _ in range(10)]) + model = keras.Sequential([ + keras.layers.Dense(10) for _ in range(10)]) for _ in range(100): # With `clear_session()` called at the beginning, # Keras starts with a blank state at each iteration # and memory consumption is constant over time. - keras_core.backend.clear_session() - model = keras_core.Sequential([ - keras_core.layers.Dense(10) for _ in range(10)]) + keras.backend.clear_session() + model = keras.Sequential([ + keras.layers.Dense(10) for _ in range(10)]) ``` Example 2: resetting the layer name generation counter - >>> layers = [keras_core.layers.Dense(10) for _ in range(10)] - >>> new_layer = keras_core.layers.Dense(10) + >>> layers = [keras.layers.Dense(10) for _ in range(10)] + >>> new_layer = keras.layers.Dense(10) >>> print(new_layer.name) dense_10 - >>> keras_core.backend.clear_session() - >>> new_layer = keras_core.layers.Dense(10) + >>> keras.backend.clear_session() + >>> new_layer = keras.layers.Dense(10) >>> print(new_layer.name) dense """ @@ -70,7 +70,7 @@ def clear_session(): GLOBAL_SETTINGS_TRACKER = threading.local() if backend.backend() == "tensorflow": - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf tf.compat.v1.reset_default_graph() if tf.executing_eagerly(): diff --git a/keras_core/backend/common/global_state_test.py b/keras/backend/common/global_state_test.py similarity index 72% rename from keras_core/backend/common/global_state_test.py rename to keras/backend/common/global_state_test.py index fe6b14672..880886a7d 100644 --- a/keras_core/backend/common/global_state_test.py +++ b/keras/backend/common/global_state_test.py @@ -1,6 +1,6 @@ -from keras_core.backend.common import global_state -from keras_core.testing import test_case -from keras_core.utils.naming import auto_name +from keras.backend.common import global_state +from keras.testing import test_case +from keras.utils.naming import auto_name class GlobalStateTest(test_case.TestCase): diff --git a/keras_core/backend/common/keras_tensor.py b/keras/backend/common/keras_tensor.py similarity index 78% rename from keras_core/backend/common/keras_tensor.py rename to keras/backend/common/keras_tensor.py index 9c7131253..430becd4b 100644 --- a/keras_core/backend/common/keras_tensor.py +++ b/keras/backend/common/keras_tensor.py @@ -1,20 +1,20 @@ import tree -from keras_core.api_export import keras_core_export -from keras_core.utils.naming import auto_name +from keras.api_export import keras_export +from keras.utils.naming import auto_name -@keras_core_export("keras_core.KerasTensor") +@keras_export("keras.KerasTensor") class KerasTensor: """Symbolic tensor -- encapsulates a shape and a dtype. You can use `KerasTensor` instances to build computation - graphs of Keras operations, such as `keras_core.Function` - objects or Functional `keras_core.models.Model` objects. + graphs of Keras operations, such as `keras.Function` + objects or Functional `keras.models.Model` objects. Example: - >>> x = keras_core.KerasTensor(shape=(3, 4), dtype="float32") + >>> x = keras.KerasTensor(shape=(3, 4), dtype="float32") >>> x.shape (3, 4) >>> x.dtype @@ -36,7 +36,7 @@ class KerasTensor: record_history=True, name=None, ): - from keras_core import backend + from keras import backend self.shape = backend.standardize_shape(shape) self.dtype = backend.standardize_dtype(dtype) @@ -49,12 +49,12 @@ class KerasTensor: return len(self.shape) def reshape(self, new_shape): - from keras_core import ops + from keras import ops return ops.Reshape(new_shape)(self) def squeeze(self, axis=None): - from keras_core import ops + from keras import ops return ops.Squeeze(axis)(self) @@ -71,8 +71,8 @@ class KerasTensor: "A KerasTensor is a symbolic placeholder for a shape and dtype, " "used when constructing Keras Functional models " "or Keras Functions. You can only use it as input to a Keras layer " - "or a Keras operation (from the namespaces `keras_core.layers` " - "and `keras_core.operations`). " + "or a Keras operation (from the namespaces `keras.layers` " + "and `keras.operations`). " "You are likely doing something like:\n\n" "```\n" "x = Input(...)\n" @@ -94,8 +94,8 @@ class KerasTensor: "A KerasTensor is a symbolic placeholder for a shape and dtype, " "used when constructing Keras Functional models " "or Keras Functions. You can only use it as input to a Keras layer " - "or a Keras operation (from the namespaces `keras_core.layers` " - "and `keras_core.operations`). " + "or a Keras operation (from the namespaces `keras.layers` " + "and `keras.operations`). " "You are likely doing something like:\n\n" "```\n" "x = Input(...)\n" @@ -126,167 +126,167 @@ class KerasTensor: raise TypeError("A symbolic KerasTensor cannot be used as a boolean.") def __add__(self, other): - from keras_core import ops + from keras import ops return ops.Add().symbolic_call(self, other) def __radd__(self, other): - from keras_core import ops + from keras import ops return ops.Add().symbolic_call(other, self) def __sub__(self, other): - from keras_core import ops + from keras import ops return ops.Subtract().symbolic_call(self, other) def __rsub__(self, other): - from keras_core import ops + from keras import ops return ops.Subtract().symbolic_call(other, self) def __mul__(self, other): - from keras_core import ops + from keras import ops return ops.Multiply().symbolic_call(self, other) def __rmul__(self, other): - from keras_core import ops + from keras import ops return ops.Multiply().symbolic_call(other, self) def __matmul__(self, other): - from keras_core import ops + from keras import ops return ops.Matmul().symbolic_call(self, other) def __rmatmul__(self, other): - from keras_core import ops + from keras import ops return ops.Matmul().symbolic_call(other, self) def __div__(self, other): - from keras_core import ops + from keras import ops return ops.Divide().symbolic_call(self, other) def __rdiv__(self, other): - from keras_core import ops + from keras import ops return ops.Divide().symbolic_call(other, self) def __truediv__(self, other): - from keras_core import ops + from keras import ops return ops.TrueDivide().symbolic_call(self, other) def __rtruediv__(self, other): - from keras_core import ops + from keras import ops return ops.TrueDivide().symbolic_call(other, self) def __neg__(self): - from keras_core import ops + from keras import ops return ops.Negative().symbolic_call(self) def __abs__(self): - from keras_core import ops + from keras import ops return ops.Absolute().symbolic_call(self) def __pow__(self, other): - from keras_core import ops + from keras import ops return ops.Power().symbolic_call(self, other) def __rpow__(self, other): - from keras_core import ops + from keras import ops return ops.Power().symbolic_call(other, self) def __floordiv__(self, other): - from keras_core import ops + from keras import ops return ops.FloorDiv().symbolic_call(self, other) def __rfloordiv__(self, other): - from keras_core import ops + from keras import ops return ops.FloorDiv().symbolic_call(other, self) def __mod__(self, other): - from keras_core import ops + from keras import ops return ops.Mod().symbolic_call(self, other) def __rmod__(self, other): - from keras_core import ops + from keras import ops return ops.Mod().symbolic_call(other, self) def __lt__(self, other): - from keras_core import ops + from keras import ops return ops.Less().symbolic_call(self, other) def __le__(self, other): - from keras_core import ops + from keras import ops return ops.LessEqual().symbolic_call(self, other) def __gt__(self, other): - from keras_core import ops + from keras import ops return ops.Greater().symbolic_call(self, other) def __ge__(self, other): - from keras_core import ops + from keras import ops return ops.GreaterEqual().symbolic_call(self, other) def __ne__(self, other): - from keras_core import ops + from keras import ops return ops.NotEqual().symbolic_call(self, other) def __and__(self, other): - from keras_core import ops + from keras import ops return ops.LogicalAnd().symbolic_call(self, other) def __rand__(self, other): - from keras_core import ops + from keras import ops return ops.LogicalAnd().symbolic_call(other, self) def __or__(self, other): - from keras_core import ops + from keras import ops return ops.LogicalOr().symbolic_call(self, other) def __ror__(self, other): - from keras_core import ops + from keras import ops return ops.LogicalOr().symbolic_call(other, self) def __invert__(self, other): - from keras_core import ops + from keras import ops return ops.LogicalNot().symbolic_call(other, self) def __xor__(self, other): - from keras_core import ops + from keras import ops return ops.LogicalXor().symbolic_call(self, other) def __rxor__(self, other): - from keras_core import ops + from keras import ops return ops.LogicalXor().symbolic_call(other, self) def __getitem__(self, key): - from keras_core import ops + from keras import ops return ops.GetItem().symbolic_call(self, key) @@ -300,8 +300,8 @@ def any_symbolic_tensors(args=None, kwargs=None): return False -@keras_core_export( - ["keras_core.utils.is_keras_tensor", "keras_core.backend.is_keras_tensor"] +@keras_export( + ["keras.utils.is_keras_tensor", "keras.backend.is_keras_tensor"] ) def is_keras_tensor(x): """Returns whether `x` is a Keras tensor. diff --git a/keras_core/backend/common/keras_tensor_test.py b/keras/backend/common/keras_tensor_test.py similarity index 87% rename from keras_core/backend/common/keras_tensor_test.py rename to keras/backend/common/keras_tensor_test.py index f860d3ec6..19a17bd8b 100644 --- a/keras_core/backend/common/keras_tensor_test.py +++ b/keras/backend/common/keras_tensor_test.py @@ -4,10 +4,10 @@ from unittest.mock import patch import numpy as np import tensorflow as tf -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.backend.common import keras_tensor +from keras import backend +from keras import ops +from keras import testing +from keras.backend.common import keras_tensor class KerasTensorTest(testing.TestCase): @@ -80,7 +80,7 @@ class KerasTensorTest(testing.TestCase): y = np.array([1, 2, 3]) self.assertFalse(keras_tensor.is_keras_tensor(y)) - @patch("keras_core.ops.Absolute.symbolic_call") + @patch("keras.ops.Absolute.symbolic_call") def test_abs_method(self, mock_symbolic_call): mock_tensor = Mock() mock_symbolic_call.return_value = mock_tensor @@ -89,51 +89,51 @@ class KerasTensorTest(testing.TestCase): mock_symbolic_call.assert_called_once_with(x) self.assertEqual(abs_x, mock_tensor) - @patch("keras_core.ops.Negative.symbolic_call") + @patch("keras.ops.Negative.symbolic_call") def test_neg_method(self, mock_method): self._test_unary_op_method(mock_method, lambda x: -x) - @patch("keras_core.ops.Subtract.symbolic_call") + @patch("keras.ops.Subtract.symbolic_call") def test_sub_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x - y) - @patch("keras_core.ops.Multiply.symbolic_call") + @patch("keras.ops.Multiply.symbolic_call") def test_mul_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x * y) - @patch("keras_core.ops.Matmul.symbolic_call") + @patch("keras.ops.Matmul.symbolic_call") def test_matmul_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x @ y) - @patch("keras_core.ops.Power.symbolic_call") + @patch("keras.ops.Power.symbolic_call") def test_pow_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x**y) - @patch("keras_core.ops.Mod.symbolic_call") + @patch("keras.ops.Mod.symbolic_call") def test_mod_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x % y) - @patch("keras_core.ops.Less.symbolic_call") + @patch("keras.ops.Less.symbolic_call") def test_lt_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x < y) - @patch("keras_core.ops.LogicalAnd.symbolic_call") + @patch("keras.ops.LogicalAnd.symbolic_call") def test_and_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x & y) - @patch("keras_core.ops.LogicalOr.symbolic_call") + @patch("keras.ops.LogicalOr.symbolic_call") def test_or_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x | y) - @patch("keras_core.ops.GetItem.symbolic_call") + @patch("keras.ops.GetItem.symbolic_call") def test_getitem_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x[y]) diff --git a/keras_core/backend/common/name_scope.py b/keras/backend/common/name_scope.py similarity index 97% rename from keras_core/backend/common/name_scope.py rename to keras/backend/common/name_scope.py index 72302ed6b..d42569b14 100644 --- a/keras_core/backend/common/name_scope.py +++ b/keras/backend/common/name_scope.py @@ -1,4 +1,4 @@ -from keras_core.backend.common import global_state +from keras.backend.common import global_state class name_scope: diff --git a/keras_core/backend/common/name_scope_test.py b/keras/backend/common/name_scope_test.py similarity index 90% rename from keras_core/backend/common/name_scope_test.py rename to keras/backend/common/name_scope_test.py index b13f8d73a..b9b3dc5c7 100644 --- a/keras_core/backend/common/name_scope_test.py +++ b/keras/backend/common/name_scope_test.py @@ -1,6 +1,6 @@ -from keras_core import testing -from keras_core.backend.common.name_scope import current_path -from keras_core.backend.common.name_scope import name_scope +from keras import testing +from keras.backend.common.name_scope import current_path +from keras.backend.common.name_scope import name_scope class NameScopeTest(testing.TestCase): diff --git a/keras_core/backend/common/stateless_scope.py b/keras/backend/common/stateless_scope.py similarity index 87% rename from keras_core/backend/common/stateless_scope.py rename to keras/backend/common/stateless_scope.py index bfe0a2084..56a072b78 100644 --- a/keras_core/backend/common/stateless_scope.py +++ b/keras/backend/common/stateless_scope.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state +from keras.api_export import keras_export +from keras.backend.common import global_state -@keras_core_export("keras_core.StatelessScope") +@keras_export("keras.StatelessScope") class StatelessScope: def __init__( self, @@ -10,8 +10,8 @@ class StatelessScope: collect_losses=False, initialize_variables=True, ): - from keras_core import backend - from keras_core.backend.common.variables import KerasVariable + from keras import backend + from keras.backend.common.variables import KerasVariable self.collect_losses = collect_losses self.initialize_variables = initialize_variables @@ -59,7 +59,7 @@ class StatelessScope: # We're back in eager scope; # if any variables were created within the stateless # scope, we initialize them here. - from keras_core.backend.common.variables import ( + from keras.backend.common.variables import ( initialize_all_variables, ) diff --git a/keras_core/backend/common/stateless_scope_test.py b/keras/backend/common/stateless_scope_test.py similarity index 92% rename from keras_core/backend/common/stateless_scope_test.py rename to keras/backend/common/stateless_scope_test.py index 9b1e553b9..f3f917620 100644 --- a/keras_core/backend/common/stateless_scope_test.py +++ b/keras/backend/common/stateless_scope_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.backend.common.stateless_scope import StatelessScope +from keras import backend +from keras import ops +from keras import testing +from keras.backend.common.stateless_scope import StatelessScope class TestStatelessScope(testing.TestCase): diff --git a/keras_core/backend/common/variables.py b/keras/backend/common/variables.py similarity index 96% rename from keras_core/backend/common/variables.py rename to keras/backend/common/variables.py index 0c2126b76..8d9e95d48 100644 --- a/keras_core/backend/common/variables.py +++ b/keras/backend/common/variables.py @@ -1,12 +1,12 @@ import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.backend import config -from keras_core.backend.common import global_state -from keras_core.backend.common.name_scope import current_path -from keras_core.backend.common.stateless_scope import get_stateless_scope -from keras_core.backend.common.stateless_scope import in_stateless_scope -from keras_core.utils.naming import auto_name +from keras.api_export import keras_export +from keras.backend import config +from keras.backend.common import global_state +from keras.backend.common.name_scope import current_path +from keras.backend.common.stateless_scope import get_stateless_scope +from keras.backend.common.stateless_scope import in_stateless_scope +from keras.utils.naming import auto_name class KerasVariable: @@ -399,7 +399,7 @@ PYTHON_DTYPES_MAP = { } -@keras_core_export("keras_core.backend.standardize_dtype") +@keras_export("keras.backend.standardize_dtype") def standardize_dtype(dtype): if dtype is None: return config.floatx() @@ -456,13 +456,13 @@ def shape_equal(a_shape, b_shape): return True -@keras_core_export("keras_core.backend.is_float_dtype") +@keras_export("keras.backend.is_float_dtype") def is_float_dtype(dtype): dtype = standardize_dtype(dtype) return dtype.startswith("float") or dtype.startswith("bfloat") -@keras_core_export("keras_core.backend.is_int_dtype") +@keras_export("keras.backend.is_int_dtype") def is_int_dtype(dtype): dtype = standardize_dtype(dtype) return dtype.startswith("int") or dtype.startswith("uint") @@ -492,7 +492,7 @@ class AutocastScope: self.original_scope = None def maybe_cast(self, value): - from keras_core import backend + from keras import backend if self.dtype is not None and is_float_dtype(value.dtype): return backend.cast(value, dtype=self.dtype) diff --git a/keras_core/backend/common/variables_test.py b/keras/backend/common/variables_test.py similarity index 92% rename from keras_core/backend/common/variables_test.py rename to keras/backend/common/variables_test.py index eeb7bef7e..86da2140b 100644 --- a/keras_core/backend/common/variables_test.py +++ b/keras/backend/common/variables_test.py @@ -1,11 +1,11 @@ import numpy as np -from keras_core import backend -from keras_core import initializers -from keras_core.backend.common.variables import AutocastScope -from keras_core.backend.common.variables import KerasVariable -from keras_core.backend.common.variables import standardize_shape -from keras_core.testing import test_case +from keras import backend +from keras import initializers +from keras.backend.common.variables import AutocastScope +from keras.backend.common.variables import KerasVariable +from keras.backend.common.variables import standardize_shape +from keras.testing import test_case class VariablesTest(test_case.TestCase): @@ -101,7 +101,7 @@ class VariablesTest(test_case.TestCase): self.assertEqual(standardized_shape, (3, 4, 5)) # TODO - # (3.9,torch) FAILED keras_core/backend/common/variables_test.py + # (3.9,torch) FAILED keras/backend/common/variables_test.py # ::VariablesTest::test_standardize_shape_with_non_integer_entry: # - AssertionError "Cannot convert '\(3, 4, 'a'\)' to a shape. # " does not match "invalid literal for int() with base 10: 'a'" diff --git a/keras_core/backend/config.py b/keras/backend/config.py similarity index 80% rename from keras_core/backend/config.py rename to keras/backend/config.py index 7c0d3d408..f77b56f37 100644 --- a/keras_core/backend/config.py +++ b/keras/backend/config.py @@ -1,7 +1,7 @@ import json import os -from keras_core.api_export import keras_core_export +from keras.api_export import keras_export # The type of float to use throughout a session. _FLOATX = "float32" @@ -16,7 +16,7 @@ _IMAGE_DATA_FORMAT = "channels_last" _BACKEND = "tensorflow" -@keras_core_export(["keras_core.config.floatx", "keras_core.backend.floatx"]) +@keras_export(["keras.config.floatx", "keras.backend.floatx"]) def floatx(): """Return the default float type, as a string. @@ -27,15 +27,15 @@ def floatx(): Example: - >>> keras_core.config.floatx() + >>> keras.config.floatx() 'float32' """ return _FLOATX -@keras_core_export( - ["keras_core.config.set_floatx", "keras_core.backend.set_floatx"] +@keras_export( + ["keras.config.set_floatx", "keras.backend.set_floatx"] ) def set_floatx(value): """Set the default float dtype. @@ -44,21 +44,21 @@ def set_floatx(value): as this will likely cause numeric stability issues. Instead, mixed precision, which leverages a mix of `float16` and `float32`. It can be configured by calling - `keras_core.mixed_precision.set_dtype_policy('mixed_float16')`. + `keras.mixed_precision.set_dtype_policy('mixed_float16')`. Args: value: String; `'float16'`, `'float32'`, or `'float64'`. Examples: - >>> keras_core.config.floatx() + >>> keras.config.floatx() 'float32' - >>> keras_core.config.set_floatx('float64') - >>> keras_core.config.floatx() + >>> keras.config.set_floatx('float64') + >>> keras.config.floatx() 'float64' >>> # Set it back to float32 - >>> keras_core.config.set_floatx('float32') + >>> keras.config.set_floatx('float32') Raises: ValueError: In case of invalid value. @@ -73,7 +73,7 @@ def set_floatx(value): _FLOATX = str(value) -@keras_core_export(["keras_core.config.epsilon", "keras_core.backend.epsilon"]) +@keras_export(["keras.config.epsilon", "keras.backend.epsilon"]) def epsilon(): """Return the value of the fuzz factor used in numeric expressions. @@ -82,15 +82,15 @@ def epsilon(): Example: - >>> keras_core.config.epsilon() + >>> keras.config.epsilon() 1e-07 """ return _EPSILON -@keras_core_export( - ["keras_core.config.set_epsilon", "keras_core.backend.set_epsilon"] +@keras_export( + ["keras.config.set_epsilon", "keras.backend.set_epsilon"] ) def set_epsilon(value): """Set the value of the fuzz factor used in numeric expressions. @@ -99,25 +99,25 @@ def set_epsilon(value): value: float. New value of epsilon. Examples: - >>> keras_core.config.epsilon() + >>> keras.config.epsilon() 1e-07 - >>> keras_core.config.set_epsilon(1e-5) - >>> keras_core.config.epsilon() + >>> keras.config.set_epsilon(1e-5) + >>> keras.config.epsilon() 1e-05 >>> # Set it back to the default value. - >>> keras_core.config.set_epsilon(1e-7) + >>> keras.config.set_epsilon(1e-7) """ global _EPSILON _EPSILON = value -@keras_core_export( +@keras_export( [ - "keras_core.config.image_data_format", - "keras_core.backend.image_data_format", + "keras.config.image_data_format", + "keras.backend.image_data_format", ] ) def image_data_format(): @@ -128,17 +128,17 @@ def image_data_format(): Example: - >>> keras_core.config.image_data_format() + >>> keras.config.image_data_format() 'channels_last' """ return _IMAGE_DATA_FORMAT -@keras_core_export( +@keras_export( [ - "keras_core.config.set_image_data_format", - "keras_core.backend.set_image_data_format", + "keras.config.set_image_data_format", + "keras.backend.set_image_data_format", ] ) def set_image_data_format(data_format): @@ -149,15 +149,15 @@ def set_image_data_format(data_format): Examples: - >>> keras_core.config.image_data_format() + >>> keras.config.image_data_format() 'channels_last' - >>> keras_core.config.set_image_data_format('channels_first') - >>> keras_core.config.image_data_format() + >>> keras.config.set_image_data_format('channels_first') + >>> keras.config.image_data_format() 'channels_first' >>> # Set it back to `'channels_last'` - >>> keras_core.config.set_image_data_format('channels_last') + >>> keras.config.set_image_data_format('channels_last') """ global _IMAGE_DATA_FORMAT @@ -258,10 +258,10 @@ if _BACKEND != "tensorflow": os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" -@keras_core_export( +@keras_export( [ - "keras_core.config.backend", - "keras_core.backend.backend", + "keras.config.backend", + "keras.backend.backend", ] ) def backend(): diff --git a/keras_core/backend/exports.py b/keras/backend/exports.py similarity index 75% rename from keras_core/backend/exports.py rename to keras/backend/exports.py index 8aa97ecca..e3e966776 100644 --- a/keras_core/backend/exports.py +++ b/keras/backend/exports.py @@ -1,5 +1,5 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export +from keras import backend +from keras.api_export import keras_export if backend.backend() == "tensorflow": BackendVariable = backend.tensorflow.core.Variable @@ -11,7 +11,7 @@ elif backend.backend() == "torch": BackendVariable = backend.torch.core.Variable backend_name_scope = backend.common.name_scope.name_scope elif backend.backend() == "numpy": - from keras_core.backend.numpy.core import Variable as NumpyVariable + from keras.backend.numpy.core import Variable as NumpyVariable BackendVariable = NumpyVariable backend_name_scope = backend.common.name_scope.name_scope @@ -19,11 +19,11 @@ else: raise RuntimeError(f"Invalid backend: {backend.backend()}") -@keras_core_export("keras_core.Variable") +@keras_export("keras.Variable") class Variable(BackendVariable): pass -@keras_core_export("keras_core.name_scope") +@keras_export("keras.name_scope") class name_scope(backend_name_scope): pass diff --git a/keras/backend/jax/__init__.py b/keras/backend/jax/__init__.py new file mode 100644 index 000000000..68431f814 --- /dev/null +++ b/keras/backend/jax/__init__.py @@ -0,0 +1,23 @@ +from keras.backend.jax import core +from keras.backend.jax import distribution_lib +from keras.backend.jax import image +from keras.backend.jax import math +from keras.backend.jax import nn +from keras.backend.jax import numpy +from keras.backend.jax import random +from keras.backend.jax.core import SUPPORTS_SPARSE_TENSORS +from keras.backend.jax.core import Variable +from keras.backend.jax.core import cast +from keras.backend.jax.core import compute_output_spec +from keras.backend.jax.core import cond +from keras.backend.jax.core import convert_to_numpy +from keras.backend.jax.core import convert_to_tensor +from keras.backend.jax.core import is_tensor +from keras.backend.jax.core import scatter +from keras.backend.jax.core import shape +from keras.backend.jax.core import stop_gradient +from keras.backend.jax.core import vectorized_map +from keras.backend.jax.rnn import cudnn_ok +from keras.backend.jax.rnn import gru +from keras.backend.jax.rnn import lstm +from keras.backend.jax.rnn import rnn diff --git a/keras_core/backend/jax/core.py b/keras/backend/jax/core.py similarity index 95% rename from keras_core/backend/jax/core.py rename to keras/backend/jax/core.py index cb51029ee..8cddbb5bb 100644 --- a/keras_core/backend/jax/core.py +++ b/keras/backend/jax/core.py @@ -6,13 +6,13 @@ import numpy as np import tree from jax.tree_util import Partial -from keras_core.backend.common import KerasVariable -from keras_core.backend.common import global_state -from keras_core.backend.common import standardize_dtype -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.backend.common.stateless_scope import StatelessScope -from keras_core.backend.jax import distribution_lib -from keras_core.utils.nest import pack_sequence_as +from keras.backend.common import KerasVariable +from keras.backend.common import global_state +from keras.backend.common import standardize_dtype +from keras.backend.common.keras_tensor import KerasTensor +from keras.backend.common.stateless_scope import StatelessScope +from keras.backend.jax import distribution_lib +from keras.utils.nest import pack_sequence_as SUPPORTS_SPARSE_TENSORS = False @@ -22,7 +22,7 @@ class Variable(KerasVariable): value = jnp.array(value, dtype=self._dtype) # Note that variable.shape is needed by distribution_lib self._shape = tuple(value.shape) - # We can't import the keras_core/distribution/distribution_lib + # We can't import the keras/distribution/distribution_lib # due to circular dependency. distribution = global_state.get_global_attribute("distribution") if distribution is not None: diff --git a/keras_core/backend/jax/distribution_lib.py b/keras/backend/jax/distribution_lib.py similarity index 100% rename from keras_core/backend/jax/distribution_lib.py rename to keras/backend/jax/distribution_lib.py diff --git a/keras_core/backend/jax/image.py b/keras/backend/jax/image.py similarity index 98% rename from keras_core/backend/jax/image.py rename to keras/backend/jax/image.py index 57e8ae9cb..84abdfa29 100644 --- a/keras_core/backend/jax/image.py +++ b/keras/backend/jax/image.py @@ -3,7 +3,7 @@ import functools import jax import jax.numpy as jnp -from keras_core.backend.jax.core import convert_to_tensor +from keras.backend.jax.core import convert_to_tensor RESIZE_INTERPOLATIONS = ( "bilinear", diff --git a/keras_core/backend/jax/layer.py b/keras/backend/jax/layer.py similarity index 100% rename from keras_core/backend/jax/layer.py rename to keras/backend/jax/layer.py diff --git a/keras_core/backend/jax/math.py b/keras/backend/jax/math.py similarity index 98% rename from keras_core/backend/jax/math.py rename to keras/backend/jax/math.py index d0dd012ec..8aa7ad1ce 100644 --- a/keras_core/backend/jax/math.py +++ b/keras/backend/jax/math.py @@ -3,9 +3,9 @@ import math import jax import jax.numpy as jnp -from keras_core.backend import standardize_dtype -from keras_core.backend.jax.core import convert_to_tensor -from keras_core.utils.module_utils import scipy +from keras.backend import standardize_dtype +from keras.backend.jax.core import convert_to_tensor +from keras.utils.module_utils import scipy def segment_sum(data, segment_ids, num_segments=None, sorted=False): diff --git a/keras_core/backend/jax/nn.py b/keras/backend/jax/nn.py similarity index 97% rename from keras_core/backend/jax/nn.py rename to keras/backend/jax/nn.py index 7fff15ada..389e56b57 100644 --- a/keras_core/backend/jax/nn.py +++ b/keras/backend/jax/nn.py @@ -4,14 +4,14 @@ import numpy as np from jax import lax from jax import nn as jnn -from keras_core.backend import standardize_data_format -from keras_core.backend import standardize_dtype -from keras_core.backend.common.backend_utils import ( +from keras.backend import standardize_data_format +from keras.backend import standardize_dtype +from keras.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) -from keras_core.backend.config import epsilon -from keras_core.backend.jax.core import cast -from keras_core.backend.jax.core import convert_to_tensor +from keras.backend.config import epsilon +from keras.backend.jax.core import cast +from keras.backend.jax.core import convert_to_tensor def relu(x): diff --git a/keras_core/backend/jax/numpy.py b/keras/backend/jax/numpy.py similarity index 98% rename from keras_core/backend/jax/numpy.py rename to keras/backend/jax/numpy.py index 6471075e0..75741a70c 100644 --- a/keras_core/backend/jax/numpy.py +++ b/keras/backend/jax/numpy.py @@ -1,8 +1,8 @@ import jax.numpy as jnp -from keras_core.backend import config -from keras_core.backend.jax.core import cast -from keras_core.backend.jax.core import convert_to_tensor +from keras.backend import config +from keras.backend.jax.core import cast +from keras.backend.jax.core import convert_to_tensor def add(x1, x2): diff --git a/keras_core/backend/jax/random.py b/keras/backend/jax/random.py similarity index 91% rename from keras_core/backend/jax/random.py rename to keras/backend/jax/random.py index 49c7d55a5..6410b2345 100644 --- a/keras_core/backend/jax/random.py +++ b/keras/backend/jax/random.py @@ -1,9 +1,9 @@ import jax -from keras_core.backend.config import floatx -from keras_core.random.seed_generator import SeedGenerator -from keras_core.random.seed_generator import draw_seed -from keras_core.random.seed_generator import make_default_seed +from keras.backend.config import floatx +from keras.random.seed_generator import SeedGenerator +from keras.random.seed_generator import draw_seed +from keras.random.seed_generator import make_default_seed def jax_draw_seed(seed): diff --git a/keras_core/backend/jax/rnn.py b/keras/backend/jax/rnn.py similarity index 98% rename from keras_core/backend/jax/rnn.py rename to keras/backend/jax/rnn.py index 9b1e0b49f..f7c49b088 100644 --- a/keras_core/backend/jax/rnn.py +++ b/keras/backend/jax/rnn.py @@ -4,8 +4,8 @@ import tree from jax import lax from jax import numpy as jnp -from keras_core.backend.common import stateless_scope -from keras_core.utils.nest import pack_sequence_as +from keras.backend.common import stateless_scope +from keras.utils.nest import pack_sequence_as def rnn( diff --git a/keras_core/backend/jax/trainer.py b/keras/backend/jax/trainer.py similarity index 98% rename from keras_core/backend/jax/trainer.py rename to keras/backend/jax/trainer.py index 4ea4a7a3c..dd4ecdafd 100644 --- a/keras_core/backend/jax/trainer.py +++ b/keras/backend/jax/trainer.py @@ -4,16 +4,16 @@ import jax import numpy as np import tree -from keras_core import backend -from keras_core import callbacks as callbacks_module -from keras_core import ops -from keras_core import optimizers as optimizers_module -from keras_core.backend import distribution_lib as jax_distribution_lib -from keras_core.distribution import distribution_lib -from keras_core.trainers import trainer as base_trainer -from keras_core.trainers.data_adapters import data_adapter_utils -from keras_core.trainers.epoch_iterator import EpochIterator -from keras_core.utils import traceback_utils +from keras import backend +from keras import callbacks as callbacks_module +from keras import ops +from keras import optimizers as optimizers_module +from keras.backend import distribution_lib as jax_distribution_lib +from keras.distribution import distribution_lib +from keras.trainers import trainer as base_trainer +from keras.trainers.data_adapters import data_adapter_utils +from keras.trainers.epoch_iterator import EpochIterator +from keras.utils import traceback_utils class JAXTrainer(base_trainer.Trainer): diff --git a/keras/backend/numpy/__init__.py b/keras/backend/numpy/__init__.py new file mode 100644 index 000000000..ce0d20e5f --- /dev/null +++ b/keras/backend/numpy/__init__.py @@ -0,0 +1,20 @@ +from keras.backend.numpy import core +from keras.backend.numpy import image +from keras.backend.numpy import math +from keras.backend.numpy import nn +from keras.backend.numpy import numpy +from keras.backend.numpy import random +from keras.backend.numpy.core import SUPPORTS_SPARSE_TENSORS +from keras.backend.numpy.core import Variable +from keras.backend.numpy.core import cast +from keras.backend.numpy.core import compute_output_spec +from keras.backend.numpy.core import cond +from keras.backend.numpy.core import convert_to_numpy +from keras.backend.numpy.core import convert_to_tensor +from keras.backend.numpy.core import is_tensor +from keras.backend.numpy.core import shape +from keras.backend.numpy.core import vectorized_map +from keras.backend.numpy.rnn import cudnn_ok +from keras.backend.numpy.rnn import gru +from keras.backend.numpy.rnn import lstm +from keras.backend.numpy.rnn import rnn diff --git a/keras_core/backend/numpy/core.py b/keras/backend/numpy/core.py similarity index 95% rename from keras_core/backend/numpy/core.py rename to keras/backend/numpy/core.py index 0b84599b1..1b3e8e408 100644 --- a/keras_core/backend/numpy/core.py +++ b/keras/backend/numpy/core.py @@ -1,11 +1,11 @@ import numpy as np import tree -from keras_core.backend.common import KerasVariable -from keras_core.backend.common import standardize_dtype -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.backend.common.stateless_scope import StatelessScope -from keras_core.utils.nest import pack_sequence_as +from keras.backend.common import KerasVariable +from keras.backend.common import standardize_dtype +from keras.backend.common.keras_tensor import KerasTensor +from keras.backend.common.stateless_scope import StatelessScope +from keras.utils.nest import pack_sequence_as SUPPORTS_SPARSE_TENSORS = False diff --git a/keras_core/backend/numpy/image.py b/keras/backend/numpy/image.py similarity index 98% rename from keras_core/backend/numpy/image.py rename to keras/backend/numpy/image.py index a879b3bda..b3b7dbddf 100644 --- a/keras_core/backend/numpy/image.py +++ b/keras/backend/numpy/image.py @@ -1,8 +1,8 @@ import jax import numpy as np -from keras_core.backend.numpy.core import convert_to_tensor -from keras_core.utils.module_utils import scipy +from keras.backend.numpy.core import convert_to_tensor +from keras.utils.module_utils import scipy RESIZE_INTERPOLATIONS = ( "bilinear", diff --git a/keras_core/backend/numpy/layer.py b/keras/backend/numpy/layer.py similarity index 100% rename from keras_core/backend/numpy/layer.py rename to keras/backend/numpy/layer.py diff --git a/keras_core/backend/numpy/math.py b/keras/backend/numpy/math.py similarity index 97% rename from keras_core/backend/numpy/math.py rename to keras/backend/numpy/math.py index 2e1c2cfcd..84c0bcc30 100644 --- a/keras_core/backend/numpy/math.py +++ b/keras/backend/numpy/math.py @@ -1,10 +1,10 @@ import numpy as np -from keras_core.backend import standardize_dtype -from keras_core.backend.jax.math import fft as jax_fft -from keras_core.backend.jax.math import fft2 as jax_fft2 -from keras_core.backend.numpy.core import convert_to_tensor -from keras_core.utils.module_utils import scipy +from keras.backend import standardize_dtype +from keras.backend.jax.math import fft as jax_fft +from keras.backend.jax.math import fft2 as jax_fft2 +from keras.backend.numpy.core import convert_to_tensor +from keras.utils.module_utils import scipy def segment_sum(data, segment_ids, num_segments=None, sorted=False): diff --git a/keras_core/backend/numpy/nn.py b/keras/backend/numpy/nn.py similarity index 97% rename from keras_core/backend/numpy/nn.py rename to keras/backend/numpy/nn.py index 231575874..d3b171150 100644 --- a/keras_core/backend/numpy/nn.py +++ b/keras/backend/numpy/nn.py @@ -3,15 +3,15 @@ import numpy as np from jax import lax from jax import numpy as jnp -from keras_core.backend import standardize_data_format -from keras_core.backend import standardize_dtype -from keras_core.backend.common.backend_utils import ( +from keras.backend import standardize_data_format +from keras.backend import standardize_dtype +from keras.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) -from keras_core.backend.config import epsilon -from keras_core.backend.numpy.core import cast -from keras_core.backend.numpy.core import is_tensor -from keras_core.utils.module_utils import scipy +from keras.backend.config import epsilon +from keras.backend.numpy.core import cast +from keras.backend.numpy.core import is_tensor +from keras.utils.module_utils import scipy def relu(x): diff --git a/keras_core/backend/numpy/numpy.py b/keras/backend/numpy/numpy.py similarity index 99% rename from keras_core/backend/numpy/numpy.py rename to keras/backend/numpy/numpy.py index d2ee78aba..32ae80f01 100644 --- a/keras_core/backend/numpy/numpy.py +++ b/keras/backend/numpy/numpy.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core.backend import config -from keras_core.backend import standardize_dtype +from keras.backend import config +from keras.backend import standardize_dtype def add(x1, x2): diff --git a/keras_core/backend/numpy/random.py b/keras/backend/numpy/random.py similarity index 91% rename from keras_core/backend/numpy/random.py rename to keras/backend/numpy/random.py index f0d6a859b..8b5ddb7a2 100644 --- a/keras_core/backend/numpy/random.py +++ b/keras/backend/numpy/random.py @@ -1,10 +1,10 @@ import numpy as np -from keras_core.backend.config import floatx -from keras_core.backend.numpy.nn import softmax -from keras_core.random.seed_generator import SeedGenerator -from keras_core.random.seed_generator import draw_seed -from keras_core.random.seed_generator import make_default_seed +from keras.backend.config import floatx +from keras.backend.numpy.nn import softmax +from keras.random.seed_generator import SeedGenerator +from keras.random.seed_generator import draw_seed +from keras.random.seed_generator import make_default_seed def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): diff --git a/keras_core/backend/numpy/rnn.py b/keras/backend/numpy/rnn.py similarity index 99% rename from keras_core/backend/numpy/rnn.py rename to keras/backend/numpy/rnn.py index 0b4a08c84..027eb0df7 100644 --- a/keras_core/backend/numpy/rnn.py +++ b/keras/backend/numpy/rnn.py @@ -1,7 +1,7 @@ import numpy as np import tree -from keras_core.utils.nest import pack_sequence_as +from keras.utils.nest import pack_sequence_as def rnn( diff --git a/keras_core/backend/numpy/trainer.py b/keras/backend/numpy/trainer.py similarity index 95% rename from keras_core/backend/numpy/trainer.py rename to keras/backend/numpy/trainer.py index 70f3b2349..e9e9acc07 100644 --- a/keras_core/backend/numpy/trainer.py +++ b/keras/backend/numpy/trainer.py @@ -1,15 +1,15 @@ import numpy as np import tree -from keras_core import backend -from keras_core import callbacks as callbacks_module -from keras_core.backend.common import standardize_dtype -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.backend.numpy.core import is_tensor -from keras_core.trainers import trainer as base_trainer -from keras_core.trainers.data_adapters import data_adapter_utils -from keras_core.trainers.epoch_iterator import EpochIterator -from keras_core.utils import traceback_utils +from keras import backend +from keras import callbacks as callbacks_module +from keras.backend.common import standardize_dtype +from keras.backend.common.keras_tensor import KerasTensor +from keras.backend.numpy.core import is_tensor +from keras.trainers import trainer as base_trainer +from keras.trainers.data_adapters import data_adapter_utils +from keras.trainers.epoch_iterator import EpochIterator +from keras.utils import traceback_utils class NumpyTrainer(base_trainer.Trainer): diff --git a/keras/backend/tensorflow/__init__.py b/keras/backend/tensorflow/__init__.py new file mode 100644 index 000000000..6a941b685 --- /dev/null +++ b/keras/backend/tensorflow/__init__.py @@ -0,0 +1,24 @@ +from keras.backend.tensorflow import core +from keras.backend.tensorflow import image +from keras.backend.tensorflow import math +from keras.backend.tensorflow import nn +from keras.backend.tensorflow import numpy +from keras.backend.tensorflow import random +from keras.backend.tensorflow import tensorboard +from keras.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS +from keras.backend.tensorflow.core import Variable +from keras.backend.tensorflow.core import cast +from keras.backend.tensorflow.core import compute_output_spec +from keras.backend.tensorflow.core import cond +from keras.backend.tensorflow.core import convert_to_numpy +from keras.backend.tensorflow.core import convert_to_tensor +from keras.backend.tensorflow.core import is_tensor +from keras.backend.tensorflow.core import name_scope +from keras.backend.tensorflow.core import scatter +from keras.backend.tensorflow.core import shape +from keras.backend.tensorflow.core import stop_gradient +from keras.backend.tensorflow.core import vectorized_map +from keras.backend.tensorflow.rnn import cudnn_ok +from keras.backend.tensorflow.rnn import gru +from keras.backend.tensorflow.rnn import lstm +from keras.backend.tensorflow.rnn import rnn diff --git a/keras_core/backend/tensorflow/core.py b/keras/backend/tensorflow/core.py similarity index 94% rename from keras_core/backend/tensorflow/core.py rename to keras/backend/tensorflow/core.py index 15e79a9e6..cbf2030b8 100644 --- a/keras_core/backend/tensorflow/core.py +++ b/keras/backend/tensorflow/core.py @@ -4,13 +4,13 @@ import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice -from keras_core.backend.common import KerasVariable -from keras_core.backend.common import global_state -from keras_core.backend.common import standardize_dtype -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.backend.common.name_scope import name_scope as base_name_scope -from keras_core.backend.common.stateless_scope import StatelessScope -from keras_core.utils.naming import auto_name +from keras.backend.common import KerasVariable +from keras.backend.common import global_state +from keras.backend.common import standardize_dtype +from keras.backend.common.keras_tensor import KerasTensor +from keras.backend.common.name_scope import name_scope as base_name_scope +from keras.backend.common.stateless_scope import StatelessScope +from keras.utils.naming import auto_name SUPPORTS_SPARSE_TENSORS = True diff --git a/keras_core/backend/tensorflow/distribute_test.py b/keras/backend/tensorflow/distribute_test.py similarity index 95% rename from keras_core/backend/tensorflow/distribute_test.py rename to keras/backend/tensorflow/distribute_test.py index 38eff4fba..413bac6f0 100644 --- a/keras_core/backend/tensorflow/distribute_test.py +++ b/keras/backend/tensorflow/distribute_test.py @@ -5,11 +5,11 @@ import pytest import tensorflow as tf from tensorflow.python.eager import context -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core.backend.tensorflow import trainer as tf_trainer +from keras import backend +from keras import layers +from keras import models +from keras import testing +from keras.backend.tensorflow import trainer as tf_trainer @pytest.mark.skipif( diff --git a/keras_core/backend/tensorflow/image.py b/keras/backend/tensorflow/image.py similarity index 99% rename from keras_core/backend/tensorflow/image.py rename to keras/backend/tensorflow/image.py index b24528049..3fa90521e 100644 --- a/keras_core/backend/tensorflow/image.py +++ b/keras/backend/tensorflow/image.py @@ -4,7 +4,7 @@ import operator import tensorflow as tf -from keras_core.backend.tensorflow.core import convert_to_tensor +from keras.backend.tensorflow.core import convert_to_tensor RESIZE_INTERPOLATIONS = ( "bilinear", diff --git a/keras_core/backend/tensorflow/layer.py b/keras/backend/tensorflow/layer.py similarity index 94% rename from keras_core/backend/tensorflow/layer.py rename to keras/backend/tensorflow/layer.py index 4301ab13d..3c50f0d41 100644 --- a/keras_core/backend/tensorflow/layer.py +++ b/keras/backend/tensorflow/layer.py @@ -1,6 +1,6 @@ import tensorflow as tf -from keras_core.utils import tf_utils +from keras.utils import tf_utils class TFLayer(tf.__internal__.tracking.AutoTrackable): @@ -64,9 +64,9 @@ class TFLayer(tf.__internal__.tracking.AutoTrackable): def _default_save_signature(self): """For SavedModel support: returns the default serving signature.""" - from keras_core.models.functional import Functional - from keras_core.models.model import Model - from keras_core.models.sequential import Sequential + from keras.models.functional import Functional + from keras.models.model import Model + from keras.models.sequential import Sequential if not isinstance(self, Model): return None diff --git a/keras_core/backend/tensorflow/math.py b/keras/backend/tensorflow/math.py similarity index 98% rename from keras_core/backend/tensorflow/math.py rename to keras/backend/tensorflow/math.py index d7139fe55..89d6bb5d3 100644 --- a/keras_core/backend/tensorflow/math.py +++ b/keras/backend/tensorflow/math.py @@ -1,7 +1,7 @@ import tensorflow as tf -from keras_core.backend import standardize_dtype -from keras_core.backend.tensorflow.core import convert_to_tensor +from keras.backend import standardize_dtype +from keras.backend.tensorflow.core import convert_to_tensor def segment_sum(data, segment_ids, num_segments=None, sorted=False): diff --git a/keras_core/backend/tensorflow/name_scope_test.py b/keras/backend/tensorflow/name_scope_test.py similarity index 93% rename from keras_core/backend/tensorflow/name_scope_test.py rename to keras/backend/tensorflow/name_scope_test.py index 1bceff4d0..8ec1aecb7 100644 --- a/keras_core/backend/tensorflow/name_scope_test.py +++ b/keras/backend/tensorflow/name_scope_test.py @@ -1,7 +1,7 @@ import tensorflow as tf -from keras_core.backend.tensorflow.core import name_scope -from keras_core.testing import TestCase +from keras.backend.tensorflow.core import name_scope +from keras.testing import TestCase class TFNameScopeTest(TestCase): diff --git a/keras_core/backend/tensorflow/nn.py b/keras/backend/tensorflow/nn.py similarity index 98% rename from keras_core/backend/tensorflow/nn.py rename to keras/backend/tensorflow/nn.py index e3eb8f6ea..756ca8bbe 100644 --- a/keras_core/backend/tensorflow/nn.py +++ b/keras/backend/tensorflow/nn.py @@ -2,13 +2,13 @@ import warnings import tensorflow as tf -from keras_core.backend import standardize_data_format -from keras_core.backend import standardize_dtype -from keras_core.backend.common.backend_utils import ( +from keras.backend import standardize_data_format +from keras.backend import standardize_dtype +from keras.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras_core.backend.config import epsilon -from keras_core.backend.tensorflow.core import cast +from keras.backend.config import epsilon +from keras.backend.tensorflow.core import cast def relu(x): diff --git a/keras_core/backend/tensorflow/numpy.py b/keras/backend/tensorflow/numpy.py similarity index 99% rename from keras_core/backend/tensorflow/numpy.py rename to keras/backend/tensorflow/numpy.py index 4b7972250..e493f1503 100644 --- a/keras_core/backend/tensorflow/numpy.py +++ b/keras/backend/tensorflow/numpy.py @@ -7,8 +7,8 @@ import tensorflow as tf from tensorflow.experimental import numpy as tfnp from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops -from keras_core.backend import config -from keras_core.backend.tensorflow.core import convert_to_tensor +from keras.backend import config +from keras.backend.tensorflow.core import convert_to_tensor def add(x1, x2): diff --git a/keras_core/backend/tensorflow/optimizer.py b/keras/backend/tensorflow/optimizer.py similarity index 98% rename from keras_core/backend/tensorflow/optimizer.py rename to keras/backend/tensorflow/optimizer.py index e736ef5fa..fbecf1a61 100644 --- a/keras_core/backend/tensorflow/optimizer.py +++ b/keras/backend/tensorflow/optimizer.py @@ -1,7 +1,7 @@ import tensorflow as tf -from keras_core import backend -from keras_core.optimizers import base_optimizer +from keras import backend +from keras.optimizers import base_optimizer class TFOptimizer(base_optimizer.BaseOptimizer): diff --git a/keras_core/backend/tensorflow/optimizer_distribute_test.py b/keras/backend/tensorflow/optimizer_distribute_test.py similarity index 97% rename from keras_core/backend/tensorflow/optimizer_distribute_test.py rename to keras/backend/tensorflow/optimizer_distribute_test.py index 5887bfbdd..31f4cb58c 100644 --- a/keras_core/backend/tensorflow/optimizer_distribute_test.py +++ b/keras/backend/tensorflow/optimizer_distribute_test.py @@ -5,9 +5,9 @@ import pytest import tensorflow as tf from tensorflow.python.eager import context -from keras_core import backend -from keras_core import testing -from keras_core.optimizers.sgd import SGD +from keras import backend +from keras import testing +from keras.optimizers.sgd import SGD @pytest.mark.skipif( diff --git a/keras_core/backend/tensorflow/random.py b/keras/backend/tensorflow/random.py similarity index 90% rename from keras_core/backend/tensorflow/random.py rename to keras/backend/tensorflow/random.py index c47b452e2..17e030ba2 100644 --- a/keras_core/backend/tensorflow/random.py +++ b/keras/backend/tensorflow/random.py @@ -1,11 +1,11 @@ import tensorflow as tf from tensorflow.experimental import numpy as tfnp -from keras_core.backend.common import standardize_dtype -from keras_core.backend.config import floatx -from keras_core.random.seed_generator import SeedGenerator -from keras_core.random.seed_generator import draw_seed -from keras_core.random.seed_generator import make_default_seed +from keras.backend.common import standardize_dtype +from keras.backend.config import floatx +from keras.random.seed_generator import SeedGenerator +from keras.random.seed_generator import draw_seed +from keras.random.seed_generator import make_default_seed def tf_draw_seed(seed): diff --git a/keras_core/backend/tensorflow/rnn.py b/keras/backend/tensorflow/rnn.py similarity index 99% rename from keras_core/backend/tensorflow/rnn.py rename to keras/backend/tensorflow/rnn.py index fa1f7c3c7..3430b1bde 100644 --- a/keras_core/backend/tensorflow/rnn.py +++ b/keras/backend/tensorflow/rnn.py @@ -1,7 +1,7 @@ import tensorflow as tf import tree -from keras_core.utils.nest import pack_sequence_as +from keras.utils.nest import pack_sequence_as def rnn( @@ -467,7 +467,7 @@ def gru( if not cudnn_supported or not inputs_supported: raise NotImplementedError - from keras_core.backend.tensorflow import Variable + from keras.backend.tensorflow import Variable if isinstance(kernel, Variable): kernel = kernel.value @@ -503,8 +503,8 @@ def _do_gru_arguments_support_cudnn( use_bias, reset_after, ): - from keras_core import activations - from keras_core import ops + from keras import activations + from keras import ops return ( activation in (activations.tanh, tf.tanh, ops.tanh) @@ -522,8 +522,8 @@ def _do_lstm_arguments_support_cudnn( unroll, use_bias, ): - from keras_core import activations - from keras_core import ops + from keras import activations + from keras import ops return ( activation in (activations.tanh, tf.tanh, ops.tanh) @@ -823,7 +823,7 @@ def lstm( if not cudnn_supported or not inputs_supported: raise NotImplementedError - from keras_core.backend.tensorflow import Variable + from keras.backend.tensorflow import Variable if isinstance(kernel, Variable): kernel = kernel.value diff --git a/keras_core/backend/tensorflow/saved_model_test.py b/keras/backend/tensorflow/saved_model_test.py similarity index 89% rename from keras_core/backend/tensorflow/saved_model_test.py rename to keras/backend/tensorflow/saved_model_test.py index 228b7c3c3..bc7368531 100644 --- a/keras_core/backend/tensorflow/saved_model_test.py +++ b/keras/backend/tensorflow/saved_model_test.py @@ -6,12 +6,12 @@ import numpy as np import pytest import tensorflow as tf -from keras_core import backend -from keras_core import layers -from keras_core import metrics -from keras_core import models -from keras_core import testing -from keras_core.saving import object_registration +from keras import backend +from keras import layers +from keras import metrics +from keras import models +from keras import testing +from keras.saving import object_registration @object_registration.register_keras_serializable(package="my_package") @@ -40,7 +40,7 @@ class SavedModelTest(testing.TestCase): X_train = np.random.rand(100, 3) y_train = np.random.rand(100, 1) model.fit(X_train, y_train) - path = os.path.join(self.get_temp_dir(), "my_keras_core_model") + path = os.path.join(self.get_temp_dir(), "my_keras_model") tf.saved_model.save(model, path) restored_model = tf.saved_model.load(path) self.assertAllClose( @@ -64,7 +64,7 @@ class SavedModelTest(testing.TestCase): X_train = np.random.rand(100, 3) y_train = np.random.rand(100, 1) model.fit(X_train, y_train) - path = os.path.join(self.get_temp_dir(), "my_keras_core_model") + path = os.path.join(self.get_temp_dir(), "my_keras_model") tf.saved_model.save(model, path) restored_model = tf.saved_model.load(path) self.assertAllClose( @@ -86,7 +86,7 @@ class SavedModelTest(testing.TestCase): X_train = np.random.rand(100, 3) y_train = np.random.rand(100, 1) model.fit(X_train, y_train) - path = os.path.join(self.get_temp_dir(), "my_keras_core_model") + path = os.path.join(self.get_temp_dir(), "my_keras_model") tf.saved_model.save(model, path) restored_model = tf.saved_model.load(path) self.assertAllClose( @@ -117,7 +117,7 @@ class SavedModelTest(testing.TestCase): model = Model() inp = np.array([[1.0]]) result = model(inp) - path = os.path.join(self.get_temp_dir(), "my_keras_core_model") + path = os.path.join(self.get_temp_dir(), "my_keras_model") tf.saved_model.save(model, path) restored_model = tf.saved_model.load(path) self.assertAllClose( @@ -131,7 +131,7 @@ class SavedModelTest(testing.TestCase): input_1 = layers.Input(shape=(3,)) input_2 = layers.Input(shape=(5,)) model = models.Model([input_1, input_2], [input_1, input_2]) - path = os.path.join(self.get_temp_dir(), "my_keras_core_model") + path = os.path.join(self.get_temp_dir(), "my_keras_model") tf.saved_model.save(model, path) restored_model = tf.saved_model.load(path) @@ -173,7 +173,7 @@ class SavedModelTest(testing.TestCase): tf.constant(i, shape=[1, 1], dtype=tf.float32) for i in range(1, 4) ] expected = model(*inp) - path = os.path.join(self.get_temp_dir(), "my_keras_core_model") + path = os.path.join(self.get_temp_dir(), "my_keras_model") tf.saved_model.save(model, path) restored_model = tf.saved_model.load(path) output = restored_model.call(*inp) diff --git a/keras_core/backend/tensorflow/tensorboard.py b/keras/backend/tensorflow/tensorboard.py similarity index 100% rename from keras_core/backend/tensorflow/tensorboard.py rename to keras/backend/tensorflow/tensorboard.py diff --git a/keras_core/backend/tensorflow/trainer.py b/keras/backend/tensorflow/trainer.py similarity index 98% rename from keras_core/backend/tensorflow/trainer.py rename to keras/backend/tensorflow/trainer.py index cefee9027..273d47d72 100644 --- a/keras_core/backend/tensorflow/trainer.py +++ b/keras/backend/tensorflow/trainer.py @@ -7,13 +7,13 @@ import tree from packaging.version import Version from tensorflow.python.eager import context as tf_context -from keras_core import callbacks as callbacks_module -from keras_core import metrics as metrics_module -from keras_core import optimizers as optimizers_module -from keras_core.trainers import trainer as base_trainer -from keras_core.trainers.data_adapters import data_adapter_utils -from keras_core.trainers.epoch_iterator import EpochIterator -from keras_core.utils import traceback_utils +from keras import callbacks as callbacks_module +from keras import metrics as metrics_module +from keras import optimizers as optimizers_module +from keras.trainers import trainer as base_trainer +from keras.trainers.data_adapters import data_adapter_utils +from keras.trainers.epoch_iterator import EpochIterator +from keras.utils import traceback_utils class TensorFlowTrainer(base_trainer.Trainer): diff --git a/keras_core/backend/tests/compute_output_spec_test.py b/keras/backend/tests/compute_output_spec_test.py similarity index 93% rename from keras_core/backend/tests/compute_output_spec_test.py rename to keras/backend/tests/compute_output_spec_test.py index d55364992..dd9737519 100644 --- a/keras_core/backend/tests/compute_output_spec_test.py +++ b/keras/backend/tests/compute_output_spec_test.py @@ -1,7 +1,7 @@ import unittest -from keras_core import backend -from keras_core.backend.common.keras_tensor import KerasTensor +from keras import backend +from keras.backend.common.keras_tensor import KerasTensor def single_arg_test_fn(x): diff --git a/keras/backend/torch/__init__.py b/keras/backend/torch/__init__.py new file mode 100644 index 000000000..57b77c404 --- /dev/null +++ b/keras/backend/torch/__init__.py @@ -0,0 +1,39 @@ +"""Torch backend APIs. + +# Note on device placement + +Torch has a different device placement style compared to TF and JAX. +In short, variables/tensors are not created on GPU by default, +and the GPU cannot directly communicate with the CPU. +To bring Torch behavior in line with TF and JAX automated device placement, +we are doing the following to automate device placement if a GPU is available: + +- Variables are created on GPU. +- Input data will be placed on GPU at the first `keras.layers.Layer` call. +- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU. +- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy. +""" + +from keras.backend.torch import core +from keras.backend.torch import image +from keras.backend.torch import math +from keras.backend.torch import nn +from keras.backend.torch import numpy +from keras.backend.torch import random +from keras.backend.torch.core import SUPPORTS_SPARSE_TENSORS +from keras.backend.torch.core import Variable +from keras.backend.torch.core import cast +from keras.backend.torch.core import compute_output_spec +from keras.backend.torch.core import cond +from keras.backend.torch.core import convert_to_numpy +from keras.backend.torch.core import convert_to_tensor +from keras.backend.torch.core import is_tensor +from keras.backend.torch.core import scatter +from keras.backend.torch.core import shape +from keras.backend.torch.core import stop_gradient +from keras.backend.torch.core import to_torch_dtype +from keras.backend.torch.core import vectorized_map +from keras.backend.torch.rnn import cudnn_ok +from keras.backend.torch.rnn import gru +from keras.backend.torch.rnn import lstm +from keras.backend.torch.rnn import rnn diff --git a/keras_core/backend/torch/core.py b/keras/backend/torch/core.py similarity index 97% rename from keras_core/backend/torch/core.py rename to keras/backend/torch/core.py index f9675a301..47bf06748 100644 --- a/keras_core/backend/torch/core.py +++ b/keras/backend/torch/core.py @@ -5,12 +5,12 @@ import numpy as np import torch import tree -from keras_core.backend.common import KerasVariable -from keras_core.backend.common import global_state -from keras_core.backend.common import standardize_dtype -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.backend.common.stateless_scope import StatelessScope -from keras_core.utils.nest import pack_sequence_as +from keras.backend.common import KerasVariable +from keras.backend.common import global_state +from keras.backend.common import standardize_dtype +from keras.backend.common.keras_tensor import KerasTensor +from keras.backend.common.stateless_scope import StatelessScope +from keras.utils.nest import pack_sequence_as SUPPORTS_SPARSE_TENSORS = False diff --git a/keras_core/backend/torch/image.py b/keras/backend/torch/image.py similarity index 99% rename from keras_core/backend/torch/image.py rename to keras/backend/torch/image.py index aef4ea9de..496a8aeec 100644 --- a/keras_core/backend/torch/image.py +++ b/keras/backend/torch/image.py @@ -4,7 +4,7 @@ import operator import torch -from keras_core.backend.torch.core import convert_to_tensor +from keras.backend.torch.core import convert_to_tensor RESIZE_INTERPOLATIONS = {} # populated after torchvision import diff --git a/keras_core/backend/torch/layer.py b/keras/backend/torch/layer.py similarity index 81% rename from keras_core/backend/torch/layer.py rename to keras/backend/torch/layer.py index d5c6d60e8..8eeb4277d 100644 --- a/keras_core/backend/torch/layer.py +++ b/keras/backend/torch/layer.py @@ -1,7 +1,7 @@ import torch -from keras_core.backend.common.stateless_scope import in_stateless_scope -from keras_core.ops.operation import Operation +from keras.backend.common.stateless_scope import in_stateless_scope +from keras.ops.operation import Operation class TorchLayer(torch.nn.Module): @@ -26,14 +26,14 @@ class TorchLayer(torch.nn.Module): return Operation.__call__(self, *args, **kwargs) def _setattr_hook(self, name, value): - from keras_core.layers import Layer + from keras.layers import Layer if ( isinstance(value, torch.nn.Module) and not isinstance(value, Layer) and not name == "torch_params" ): - from keras_core.utils.torch_utils import TorchModuleWrapper + from keras.utils.torch_utils import TorchModuleWrapper if not isinstance(self, TorchModuleWrapper): value = TorchModuleWrapper(value) diff --git a/keras_core/backend/torch/math.py b/keras/backend/torch/math.py similarity index 98% rename from keras_core/backend/torch/math.py rename to keras/backend/torch/math.py index eecdc4902..a5b4c38da 100644 --- a/keras_core/backend/torch/math.py +++ b/keras/backend/torch/math.py @@ -2,10 +2,10 @@ import math import torch -from keras_core.backend import standardize_dtype -from keras_core.backend.torch.core import convert_to_tensor -from keras_core.backend.torch.core import get_device -from keras_core.backend.torch.numpy import pad +from keras.backend import standardize_dtype +from keras.backend.torch.core import convert_to_tensor +from keras.backend.torch.core import get_device +from keras.backend.torch.numpy import pad def segment_sum(data, segment_ids, num_segments=None, **kwargs): diff --git a/keras_core/backend/torch/nn.py b/keras/backend/torch/nn.py similarity index 97% rename from keras_core/backend/torch/nn.py rename to keras/backend/torch/nn.py index 295c88914..c04584c48 100644 --- a/keras_core/backend/torch/nn.py +++ b/keras/backend/torch/nn.py @@ -2,19 +2,19 @@ import numpy as np import torch import torch.nn.functional as tnn -from keras_core.backend import standardize_data_format -from keras_core.backend import standardize_dtype -from keras_core.backend.common.backend_utils import ( +from keras.backend import standardize_data_format +from keras.backend import standardize_dtype +from keras.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_torch, ) -from keras_core.backend.config import epsilon -from keras_core.backend.torch.core import cast -from keras_core.backend.torch.core import convert_to_tensor -from keras_core.backend.torch.core import get_device -from keras_core.backend.torch.numpy import expand_dims -from keras_core.backend.torch.numpy import maximum -from keras_core.backend.torch.numpy import where -from keras_core.utils.argument_validation import standardize_tuple +from keras.backend.config import epsilon +from keras.backend.torch.core import cast +from keras.backend.torch.core import convert_to_tensor +from keras.backend.torch.core import get_device +from keras.backend.torch.numpy import expand_dims +from keras.backend.torch.numpy import maximum +from keras.backend.torch.numpy import where +from keras.utils.argument_validation import standardize_tuple def relu(x): diff --git a/keras_core/backend/torch/numpy.py b/keras/backend/torch/numpy.py similarity index 98% rename from keras_core/backend/torch/numpy.py rename to keras/backend/torch/numpy.py index 04dd695aa..cf2322b60 100644 --- a/keras_core/backend/torch/numpy.py +++ b/keras/backend/torch/numpy.py @@ -1,12 +1,12 @@ import numpy as np import torch -from keras_core.backend import config -from keras_core.backend.torch.core import cast -from keras_core.backend.torch.core import convert_to_tensor -from keras_core.backend.torch.core import get_device -from keras_core.backend.torch.core import is_tensor -from keras_core.backend.torch.core import to_torch_dtype +from keras.backend import config +from keras.backend.torch.core import cast +from keras.backend.torch.core import convert_to_tensor +from keras.backend.torch.core import get_device +from keras.backend.torch.core import is_tensor +from keras.backend.torch.core import to_torch_dtype TORCH_INT_TYPES = ( torch.int8, @@ -771,9 +771,9 @@ def repeat(x, repeats, axis=None): if get_device() == "meta": # Import upper level modules locally to avoid circular imports # TODO: Refactor the upper level modules to avoid these imports. - from keras_core.backend import KerasTensor - from keras_core.backend import standardize_dtype - from keras_core.ops.numpy import repeat + from keras.backend import KerasTensor + from keras.backend import standardize_dtype + from keras.ops.numpy import repeat x = KerasTensor(x.shape, standardize_dtype(x.dtype)) outputs = repeat(x, repeats, axis=axis) diff --git a/keras/backend/torch/optimizers/__init__.py b/keras/backend/torch/optimizers/__init__.py new file mode 100644 index 000000000..d067b931b --- /dev/null +++ b/keras/backend/torch/optimizers/__init__.py @@ -0,0 +1 @@ +from keras.backend.torch.optimizers.torch_optimizer import TorchOptimizer diff --git a/keras_core/backend/torch/optimizers/torch_adadelta.py b/keras/backend/torch/optimizers/torch_adadelta.py similarity index 92% rename from keras_core/backend/torch/optimizers/torch_adadelta.py rename to keras/backend/torch/optimizers/torch_adadelta.py index a05a6cbf1..c8a3607f9 100644 --- a/keras_core/backend/torch/optimizers/torch_adadelta.py +++ b/keras/backend/torch/optimizers/torch_adadelta.py @@ -1,8 +1,8 @@ import torch -from keras_core import ops -from keras_core import optimizers -from keras_core.backend.torch.optimizers import torch_parallel_optimizer +from keras import ops +from keras import optimizers +from keras.backend.torch.optimizers import torch_parallel_optimizer class Adadelta( diff --git a/keras_core/backend/torch/optimizers/torch_adagrad.py b/keras/backend/torch/optimizers/torch_adagrad.py similarity index 87% rename from keras_core/backend/torch/optimizers/torch_adagrad.py rename to keras/backend/torch/optimizers/torch_adagrad.py index 117a93bc4..2cdaa87c1 100644 --- a/keras_core/backend/torch/optimizers/torch_adagrad.py +++ b/keras/backend/torch/optimizers/torch_adagrad.py @@ -1,8 +1,8 @@ import torch -from keras_core import ops -from keras_core import optimizers -from keras_core.backend.torch.optimizers import torch_parallel_optimizer +from keras import ops +from keras import optimizers +from keras.backend.torch.optimizers import torch_parallel_optimizer class Adagrad( diff --git a/keras_core/backend/torch/optimizers/torch_adam.py b/keras/backend/torch/optimizers/torch_adam.py similarity index 92% rename from keras_core/backend/torch/optimizers/torch_adam.py rename to keras/backend/torch/optimizers/torch_adam.py index 1e1e424d5..7819a0396 100644 --- a/keras_core/backend/torch/optimizers/torch_adam.py +++ b/keras/backend/torch/optimizers/torch_adam.py @@ -1,8 +1,8 @@ import torch -from keras_core import ops -from keras_core import optimizers -from keras_core.backend.torch.optimizers import torch_parallel_optimizer +from keras import ops +from keras import optimizers +from keras.backend.torch.optimizers import torch_parallel_optimizer class Adam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adam): diff --git a/keras_core/backend/torch/optimizers/torch_adamax.py b/keras/backend/torch/optimizers/torch_adamax.py similarity index 90% rename from keras_core/backend/torch/optimizers/torch_adamax.py rename to keras/backend/torch/optimizers/torch_adamax.py index 19614058d..b9463ca07 100644 --- a/keras_core/backend/torch/optimizers/torch_adamax.py +++ b/keras/backend/torch/optimizers/torch_adamax.py @@ -1,8 +1,8 @@ import torch -from keras_core import ops -from keras_core import optimizers -from keras_core.backend.torch.optimizers import torch_parallel_optimizer +from keras import ops +from keras import optimizers +from keras.backend.torch.optimizers import torch_parallel_optimizer class Adamax( diff --git a/keras/backend/torch/optimizers/torch_adamw.py b/keras/backend/torch/optimizers/torch_adamw.py new file mode 100644 index 000000000..c43ed17bc --- /dev/null +++ b/keras/backend/torch/optimizers/torch_adamw.py @@ -0,0 +1,6 @@ +from keras import optimizers +from keras.backend.torch.optimizers import torch_adam + + +class AdamW(torch_adam.Adam, optimizers.AdamW): + pass diff --git a/keras_core/backend/torch/optimizers/torch_lion.py b/keras/backend/torch/optimizers/torch_lion.py similarity index 87% rename from keras_core/backend/torch/optimizers/torch_lion.py rename to keras/backend/torch/optimizers/torch_lion.py index e3d0e0e1f..9bb58d6fe 100644 --- a/keras_core/backend/torch/optimizers/torch_lion.py +++ b/keras/backend/torch/optimizers/torch_lion.py @@ -1,8 +1,8 @@ import torch -from keras_core import ops -from keras_core import optimizers -from keras_core.backend.torch.optimizers import torch_parallel_optimizer +from keras import ops +from keras import optimizers +from keras.backend.torch.optimizers import torch_parallel_optimizer class Lion(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Lion): diff --git a/keras_core/backend/torch/optimizers/torch_nadam.py b/keras/backend/torch/optimizers/torch_nadam.py similarity index 92% rename from keras_core/backend/torch/optimizers/torch_nadam.py rename to keras/backend/torch/optimizers/torch_nadam.py index 8962248a4..08b73bb0c 100644 --- a/keras_core/backend/torch/optimizers/torch_nadam.py +++ b/keras/backend/torch/optimizers/torch_nadam.py @@ -1,9 +1,9 @@ import torch -from keras_core import ops -from keras_core import optimizers -from keras_core.backend.torch import core -from keras_core.backend.torch.optimizers import torch_parallel_optimizer +from keras import ops +from keras import optimizers +from keras.backend.torch import core +from keras.backend.torch.optimizers import torch_parallel_optimizer class Nadam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Nadam): diff --git a/keras_core/backend/torch/optimizers/torch_optimizer.py b/keras/backend/torch/optimizers/torch_optimizer.py similarity index 59% rename from keras_core/backend/torch/optimizers/torch_optimizer.py rename to keras/backend/torch/optimizers/torch_optimizer.py index 9914bb3c7..0c9ec3843 100644 --- a/keras_core/backend/torch/optimizers/torch_optimizer.py +++ b/keras/backend/torch/optimizers/torch_optimizer.py @@ -1,21 +1,21 @@ import torch -from keras_core import optimizers -from keras_core.optimizers.base_optimizer import BaseOptimizer +from keras import optimizers +from keras.optimizers.base_optimizer import BaseOptimizer class TorchOptimizer(BaseOptimizer): def __new__(cls, *args, **kwargs): # Import locally to avoid circular imports. - from keras_core.backend.torch.optimizers import torch_adadelta - from keras_core.backend.torch.optimizers import torch_adagrad - from keras_core.backend.torch.optimizers import torch_adam - from keras_core.backend.torch.optimizers import torch_adamax - from keras_core.backend.torch.optimizers import torch_adamw - from keras_core.backend.torch.optimizers import torch_lion - from keras_core.backend.torch.optimizers import torch_nadam - from keras_core.backend.torch.optimizers import torch_rmsprop - from keras_core.backend.torch.optimizers import torch_sgd + from keras.backend.torch.optimizers import torch_adadelta + from keras.backend.torch.optimizers import torch_adagrad + from keras.backend.torch.optimizers import torch_adam + from keras.backend.torch.optimizers import torch_adamax + from keras.backend.torch.optimizers import torch_adamw + from keras.backend.torch.optimizers import torch_lion + from keras.backend.torch.optimizers import torch_nadam + from keras.backend.torch.optimizers import torch_rmsprop + from keras.backend.torch.optimizers import torch_sgd OPTIMIZERS = { optimizers.Adadelta: torch_adadelta.Adadelta, diff --git a/keras_core/backend/torch/optimizers/torch_parallel_optimizer.py b/keras/backend/torch/optimizers/torch_parallel_optimizer.py similarity index 85% rename from keras_core/backend/torch/optimizers/torch_parallel_optimizer.py rename to keras/backend/torch/optimizers/torch_parallel_optimizer.py index 2bd21a0d5..d06a86c32 100644 --- a/keras_core/backend/torch/optimizers/torch_parallel_optimizer.py +++ b/keras/backend/torch/optimizers/torch_parallel_optimizer.py @@ -1,4 +1,4 @@ -from keras_core.optimizers.base_optimizer import BaseOptimizer +from keras.optimizers.base_optimizer import BaseOptimizer class TorchParallelOptimizer(BaseOptimizer): diff --git a/keras_core/backend/torch/optimizers/torch_rmsprop.py b/keras/backend/torch/optimizers/torch_rmsprop.py similarity index 93% rename from keras_core/backend/torch/optimizers/torch_rmsprop.py rename to keras/backend/torch/optimizers/torch_rmsprop.py index 5b6afb41f..100c72c25 100644 --- a/keras_core/backend/torch/optimizers/torch_rmsprop.py +++ b/keras/backend/torch/optimizers/torch_rmsprop.py @@ -1,8 +1,8 @@ import torch -from keras_core import ops -from keras_core import optimizers -from keras_core.backend.torch.optimizers import torch_parallel_optimizer +from keras import ops +from keras import optimizers +from keras.backend.torch.optimizers import torch_parallel_optimizer class RMSprop( diff --git a/keras_core/backend/torch/optimizers/torch_sgd.py b/keras/backend/torch/optimizers/torch_sgd.py similarity index 90% rename from keras_core/backend/torch/optimizers/torch_sgd.py rename to keras/backend/torch/optimizers/torch_sgd.py index 08ba08e17..726f10217 100644 --- a/keras_core/backend/torch/optimizers/torch_sgd.py +++ b/keras/backend/torch/optimizers/torch_sgd.py @@ -1,7 +1,7 @@ import torch -from keras_core import optimizers -from keras_core.backend.torch.optimizers import torch_parallel_optimizer +from keras import optimizers +from keras.backend.torch.optimizers import torch_parallel_optimizer class SGD(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.SGD): diff --git a/keras_core/backend/torch/random.py b/keras/backend/torch/random.py similarity index 93% rename from keras_core/backend/torch/random.py rename to keras/backend/torch/random.py index 1035ae12e..b4365c02d 100644 --- a/keras_core/backend/torch/random.py +++ b/keras/backend/torch/random.py @@ -1,13 +1,13 @@ import torch import torch.nn.functional as tnn -from keras_core.backend.config import floatx -from keras_core.backend.torch.core import convert_to_tensor -from keras_core.backend.torch.core import get_device -from keras_core.backend.torch.core import to_torch_dtype -from keras_core.random.seed_generator import SeedGenerator -from keras_core.random.seed_generator import draw_seed -from keras_core.random.seed_generator import make_default_seed +from keras.backend.config import floatx +from keras.backend.torch.core import convert_to_tensor +from keras.backend.torch.core import get_device +from keras.backend.torch.core import to_torch_dtype +from keras.random.seed_generator import SeedGenerator +from keras.random.seed_generator import draw_seed +from keras.random.seed_generator import make_default_seed def torch_seed_generator(seed): diff --git a/keras_core/backend/torch/rnn.py b/keras/backend/torch/rnn.py similarity index 99% rename from keras_core/backend/torch/rnn.py rename to keras/backend/torch/rnn.py index a2cdc4b72..3bdca9de0 100644 --- a/keras_core/backend/torch/rnn.py +++ b/keras/backend/torch/rnn.py @@ -1,8 +1,8 @@ import torch import tree -from keras_core.backend.torch.core import convert_to_tensor -from keras_core.utils.nest import pack_sequence_as +from keras.backend.torch.core import convert_to_tensor +from keras.utils.nest import pack_sequence_as def rnn( diff --git a/keras_core/backend/torch/trainer.py b/keras/backend/torch/trainer.py similarity index 97% rename from keras_core/backend/torch/trainer.py rename to keras/backend/torch/trainer.py index a8a05e2dd..5eee0ed27 100644 --- a/keras_core/backend/torch/trainer.py +++ b/keras/backend/torch/trainer.py @@ -4,16 +4,16 @@ import numpy as np import torch import tree -from keras_core import backend -from keras_core import callbacks as callbacks_module -from keras_core import optimizers as optimizers_module -from keras_core.backend.common import standardize_dtype -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.backend.torch.core import is_tensor -from keras_core.trainers import trainer as base_trainer -from keras_core.trainers.data_adapters import data_adapter_utils -from keras_core.trainers.epoch_iterator import EpochIterator -from keras_core.utils import traceback_utils +from keras import backend +from keras import callbacks as callbacks_module +from keras import optimizers as optimizers_module +from keras.backend.common import standardize_dtype +from keras.backend.common.keras_tensor import KerasTensor +from keras.backend.torch.core import is_tensor +from keras.trainers import trainer as base_trainer +from keras.trainers.data_adapters import data_adapter_utils +from keras.trainers.epoch_iterator import EpochIterator +from keras.utils import traceback_utils class TorchTrainer(base_trainer.Trainer): diff --git a/keras/callbacks/__init__.py b/keras/callbacks/__init__.py new file mode 100644 index 000000000..6edc0e30c --- /dev/null +++ b/keras/callbacks/__init__.py @@ -0,0 +1,14 @@ +from keras.callbacks.backup_and_restore_callback import BackupAndRestore +from keras.callbacks.callback import Callback +from keras.callbacks.callback_list import CallbackList +from keras.callbacks.csv_logger import CSVLogger +from keras.callbacks.early_stopping import EarlyStopping +from keras.callbacks.history import History +from keras.callbacks.lambda_callback import LambdaCallback +from keras.callbacks.learning_rate_scheduler import LearningRateScheduler +from keras.callbacks.model_checkpoint import ModelCheckpoint +from keras.callbacks.progbar_logger import ProgbarLogger +from keras.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau +from keras.callbacks.remote_monitor import RemoteMonitor +from keras.callbacks.tensorboard import TensorBoard +from keras.callbacks.terminate_on_nan import TerminateOnNaN diff --git a/keras_core/callbacks/backup_and_restore_callback.py b/keras/callbacks/backup_and_restore_callback.py similarity index 96% rename from keras_core/callbacks/backup_and_restore_callback.py rename to keras/callbacks/backup_and_restore_callback.py index 52f5b67d1..12cfe29d6 100644 --- a/keras_core/callbacks/backup_and_restore_callback.py +++ b/keras/callbacks/backup_and_restore_callback.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.utils import file_utils +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.utils import file_utils -@keras_core_export("keras_core.callbacks.BackupAndRestore") +@keras_export("keras.callbacks.BackupAndRestore") class BackupAndRestore(Callback): """Callback to back up and restore the training state. diff --git a/keras_core/callbacks/backup_and_restore_callback_test.py b/keras/callbacks/backup_and_restore_callback_test.py similarity index 96% rename from keras_core/callbacks/backup_and_restore_callback_test.py rename to keras/callbacks/backup_and_restore_callback_test.py index a08956a85..38eefe783 100644 --- a/keras_core/callbacks/backup_and_restore_callback_test.py +++ b/keras/callbacks/backup_and_restore_callback_test.py @@ -1,11 +1,11 @@ import numpy as np import pytest -from keras_core import callbacks -from keras_core import layers -from keras_core import testing -from keras_core.models import Sequential -from keras_core.utils import file_utils +from keras import callbacks +from keras import layers +from keras import testing +from keras.models import Sequential +from keras.utils import file_utils class InterruptingCallback(callbacks.Callback): diff --git a/keras_core/callbacks/callback.py b/keras/callbacks/callback.py similarity index 97% rename from keras_core/callbacks/callback.py rename to keras/callbacks/callback.py index c76d9c539..764118655 100644 --- a/keras_core/callbacks/callback.py +++ b/keras/callbacks/callback.py @@ -1,8 +1,8 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export +from keras import backend +from keras.api_export import keras_export -@keras_core_export("keras_core.callbacks.Callback") +@keras_export("keras.callbacks.Callback") class Callback: """Base class used to build new callbacks. @@ -10,7 +10,7 @@ class Callback: `predict()` in order to hook into the various stages of the model training, evaluation, and inference lifecycle. - To create a custom callback, subclass `keras_core.callbacks.Callback` and + To create a custom callback, subclass `keras.callbacks.Callback` and override the method associated with the stage of interest. Example: @@ -37,7 +37,7 @@ class Callback: Example: ```python - callbacks = keras_core.callbacks.CallbackList([...]) + callbacks = keras.callbacks.CallbackList([...]) callbacks.append(...) callbacks.on_train_begin(...) for epoch in range(EPOCHS): diff --git a/keras_core/callbacks/callback_list.py b/keras/callbacks/callback_list.py similarity index 94% rename from keras_core/callbacks/callback_list.py rename to keras/callbacks/callback_list.py index b76d13cad..c3048fd31 100644 --- a/keras_core/callbacks/callback_list.py +++ b/keras/callbacks/callback_list.py @@ -1,12 +1,12 @@ import tree -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.callbacks.history import History -from keras_core.callbacks.progbar_logger import ProgbarLogger +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.callbacks.history import History +from keras.callbacks.progbar_logger import ProgbarLogger -@keras_core_export("keras_core.callbacks.CallbackList") +@keras_export("keras.callbacks.CallbackList") class CallbackList(Callback): """Container abstracting a list of callbacks.""" diff --git a/keras_core/callbacks/callback_test.py b/keras/callbacks/callback_test.py similarity index 88% rename from keras_core/callbacks/callback_test.py rename to keras/callbacks/callback_test.py index b4f49ed5a..fcacf7eaa 100644 --- a/keras_core/callbacks/callback_test.py +++ b/keras/callbacks/callback_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import models -from keras_core import testing -from keras_core.callbacks.callback import Callback +from keras import models +from keras import testing +from keras.callbacks.callback import Callback class CallbackTest(testing.TestCase): diff --git a/keras_core/callbacks/csv_logger.py b/keras/callbacks/csv_logger.py similarity index 93% rename from keras_core/callbacks/csv_logger.py rename to keras/callbacks/csv_logger.py index 20e480469..baab82ade 100644 --- a/keras_core/callbacks/csv_logger.py +++ b/keras/callbacks/csv_logger.py @@ -3,12 +3,12 @@ import csv import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.utils import file_utils +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.utils import file_utils -@keras_core_export("keras_core.callbacks.CSVLogger") +@keras_export("keras.callbacks.CSVLogger") class CSVLogger(Callback): """Callback that streams epoch results to a CSV file. diff --git a/keras_core/callbacks/csv_logger_test.py b/keras/callbacks/csv_logger_test.py similarity index 96% rename from keras_core/callbacks/csv_logger_test.py rename to keras/callbacks/csv_logger_test.py index 27a2d7b57..e2361b371 100644 --- a/keras_core/callbacks/csv_logger_test.py +++ b/keras/callbacks/csv_logger_test.py @@ -6,12 +6,12 @@ import tempfile import numpy as np import pytest -from keras_core import callbacks -from keras_core import initializers -from keras_core import layers -from keras_core import testing -from keras_core.models import Sequential -from keras_core.utils import numerical_utils +from keras import callbacks +from keras import initializers +from keras import layers +from keras import testing +from keras.models import Sequential +from keras.utils import numerical_utils TRAIN_SAMPLES = 10 TEST_SAMPLES = 10 diff --git a/keras_core/callbacks/early_stopping.py b/keras/callbacks/early_stopping.py similarity index 94% rename from keras_core/callbacks/early_stopping.py rename to keras/callbacks/early_stopping.py index 03eafa190..90469130d 100644 --- a/keras_core/callbacks/early_stopping.py +++ b/keras/callbacks/early_stopping.py @@ -1,12 +1,12 @@ import warnings -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.utils import io_utils +from keras import ops +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.utils import io_utils -@keras_core_export("keras_core.callbacks.EarlyStopping") +@keras_export("keras.callbacks.EarlyStopping") class EarlyStopping(Callback): """Stop training when a monitored metric has stopped improving. @@ -52,12 +52,12 @@ class EarlyStopping(Callback): Example: - >>> callback = keras_core.callbacks.EarlyStopping(monitor='loss', + >>> callback = keras.callbacks.EarlyStopping(monitor='loss', ... patience=3) >>> # This callback will stop the training when there is no improvement in >>> # the loss for three consecutive epochs. - >>> model = keras_core.models.Sequential([keras_core.layers.Dense(10)]) - >>> model.compile(keras_core.optimizers.SGD(), loss='mse') + >>> model = keras.models.Sequential([keras.layers.Dense(10)]) + >>> model.compile(keras.optimizers.SGD(), loss='mse') >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=10, batch_size=1, callbacks=[callback], ... verbose=0) diff --git a/keras_core/callbacks/early_stopping_test.py b/keras/callbacks/early_stopping_test.py similarity index 98% rename from keras_core/callbacks/early_stopping_test.py rename to keras/callbacks/early_stopping_test.py index 19d346894..dbc614282 100644 --- a/keras_core/callbacks/early_stopping_test.py +++ b/keras/callbacks/early_stopping_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras_core import callbacks -from keras_core import layers -from keras_core import models -from keras_core import testing +from keras import callbacks +from keras import layers +from keras import models +from keras import testing class EarlyStoppingTest(testing.TestCase): diff --git a/keras_core/callbacks/history.py b/keras/callbacks/history.py similarity index 88% rename from keras_core/callbacks/history.py rename to keras/callbacks/history.py index c0479e333..dd2a7d488 100644 --- a/keras_core/callbacks/history.py +++ b/keras/callbacks/history.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback +from keras.api_export import keras_export +from keras.callbacks.callback import Callback -@keras_core_export("keras_core.callbacks.History") +@keras_export("keras.callbacks.History") class History(Callback): """Callback that records events into a `History` object. diff --git a/keras_core/callbacks/lambda_callback.py b/keras/callbacks/lambda_callback.py similarity index 95% rename from keras_core/callbacks/lambda_callback.py rename to keras/callbacks/lambda_callback.py index a89f2c942..59756aebd 100644 --- a/keras_core/callbacks/lambda_callback.py +++ b/keras/callbacks/lambda_callback.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback +from keras.api_export import keras_export +from keras.callbacks.callback import Callback -@keras_core_export("keras_core.callbacks.LambdaCallback") +@keras_export("keras.callbacks.LambdaCallback") class LambdaCallback(Callback): """Callback for creating simple, custom callbacks on-the-fly. diff --git a/keras_core/callbacks/lambda_callback_test.py b/keras/callbacks/lambda_callback_test.py similarity index 96% rename from keras_core/callbacks/lambda_callback_test.py rename to keras/callbacks/lambda_callback_test.py index d6b26c7ec..bdecdac95 100644 --- a/keras_core/callbacks/lambda_callback_test.py +++ b/keras/callbacks/lambda_callback_test.py @@ -2,12 +2,12 @@ import numpy as np import pytest from absl import logging -from keras_core import callbacks -from keras_core import layers -from keras_core import losses -from keras_core import optimizers -from keras_core import testing -from keras_core.models.sequential import Sequential +from keras import callbacks +from keras import layers +from keras import losses +from keras import optimizers +from keras import testing +from keras.models.sequential import Sequential class LambdaCallbackTest(testing.TestCase): diff --git a/keras_core/callbacks/learning_rate_scheduler.py b/keras/callbacks/learning_rate_scheduler.py similarity index 85% rename from keras_core/callbacks/learning_rate_scheduler.py rename to keras/callbacks/learning_rate_scheduler.py index a72c44661..dfedac312 100644 --- a/keras_core/callbacks/learning_rate_scheduler.py +++ b/keras/callbacks/learning_rate_scheduler.py @@ -1,12 +1,12 @@ import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.utils import io_utils +from keras import backend +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.utils import io_utils -@keras_core_export("keras_core.callbacks.LearningRateScheduler") +@keras_export("keras.callbacks.LearningRateScheduler") class LearningRateScheduler(Callback): """Learning rate scheduler. @@ -31,12 +31,12 @@ class LearningRateScheduler(Callback): ... else: ... return lr * ops.exp(-0.1) >>> - >>> model = keras_core.models.Sequential([keras_core.layers.Dense(10)]) - >>> model.compile(keras_core.optimizers.SGD(), loss='mse') + >>> model = keras.models.Sequential([keras.layers.Dense(10)]) + >>> model.compile(keras.optimizers.SGD(), loss='mse') >>> round(model.optimizer.learning_rate, 5) 0.01 - >>> callback = keras_core.callbacks.LearningRateScheduler(scheduler) + >>> callback = keras.callbacks.LearningRateScheduler(scheduler) >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=15, callbacks=[callback], verbose=0) >>> round(model.optimizer.learning_rate, 5) diff --git a/keras_core/callbacks/learning_rate_scheduler_test.py b/keras/callbacks/learning_rate_scheduler_test.py similarity index 92% rename from keras_core/callbacks/learning_rate_scheduler_test.py rename to keras/callbacks/learning_rate_scheduler_test.py index 633e2e68b..727682561 100644 --- a/keras_core/callbacks/learning_rate_scheduler_test.py +++ b/keras/callbacks/learning_rate_scheduler_test.py @@ -1,13 +1,13 @@ import pytest -from keras_core import callbacks -from keras_core import layers -from keras_core import optimizers -from keras_core import testing -from keras_core.models import Sequential -from keras_core.testing import test_utils -from keras_core.utils import io_utils -from keras_core.utils import numerical_utils +from keras import callbacks +from keras import layers +from keras import optimizers +from keras import testing +from keras.models import Sequential +from keras.testing import test_utils +from keras.utils import io_utils +from keras.utils import numerical_utils class LearningRateSchedulerTest(testing.TestCase): diff --git a/keras_core/callbacks/model_checkpoint.py b/keras/callbacks/model_checkpoint.py similarity index 97% rename from keras_core/callbacks/model_checkpoint.py rename to keras/callbacks/model_checkpoint.py index 5165e7ffb..ec78376af 100644 --- a/keras_core/callbacks/model_checkpoint.py +++ b/keras/callbacks/model_checkpoint.py @@ -4,13 +4,13 @@ import warnings import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.utils import file_utils -from keras_core.utils import io_utils +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.utils import file_utils +from keras.utils import io_utils -@keras_core_export("keras_core.callbacks.ModelCheckpoint") +@keras_export("keras.callbacks.ModelCheckpoint") class ModelCheckpoint(Callback): """Callback to save the Keras model or model weights at some frequency. @@ -38,7 +38,7 @@ class ModelCheckpoint(Callback): EPOCHS = 10 checkpoint_filepath = '/tmp/ckpt/checkpoint.model.keras' - model_checkpoint_callback = keras_core.callbacks.ModelCheckpoint( + model_checkpoint_callback = keras.callbacks.ModelCheckpoint( filepath=checkpoint_filepath, monitor='val_accuracy', mode='max', @@ -48,11 +48,11 @@ class ModelCheckpoint(Callback): model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback]) # The model (that are considered the best) can be loaded as - - keras_core.models.load_model(checkpoint_filepath) + keras.models.load_model(checkpoint_filepath) # Alternatively, one could checkpoint just the model weights as - checkpoint_filepath = '/tmp/ckpt/checkpoint.weights.h5' - model_checkpoint_callback = keras_core.callbacks.ModelCheckpoint( + model_checkpoint_callback = keras.callbacks.ModelCheckpoint( filepath=checkpoint_filepath, save_weights_only=True, monitor='val_accuracy', diff --git a/keras_core/callbacks/model_checkpoint_test.py b/keras/callbacks/model_checkpoint_test.py similarity index 98% rename from keras_core/callbacks/model_checkpoint_test.py rename to keras/callbacks/model_checkpoint_test.py index 08f17072c..bea73de7b 100644 --- a/keras_core/callbacks/model_checkpoint_test.py +++ b/keras/callbacks/model_checkpoint_test.py @@ -3,15 +3,15 @@ import warnings import pytest -from keras_core import callbacks -from keras_core import layers -from keras_core import metrics -from keras_core import models -from keras_core import saving -from keras_core import testing -from keras_core.models import Sequential -from keras_core.testing import test_utils -from keras_core.utils import numerical_utils +from keras import callbacks +from keras import layers +from keras import metrics +from keras import models +from keras import saving +from keras import testing +from keras.models import Sequential +from keras.testing import test_utils +from keras.utils import numerical_utils try: import h5py diff --git a/keras_core/callbacks/progbar_logger.py b/keras/callbacks/progbar_logger.py similarity index 92% rename from keras_core/callbacks/progbar_logger.py rename to keras/callbacks/progbar_logger.py index fe3c49f04..be82f8a4f 100644 --- a/keras_core/callbacks/progbar_logger.py +++ b/keras/callbacks/progbar_logger.py @@ -1,10 +1,10 @@ -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.utils import io_utils -from keras_core.utils.progbar import Progbar +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.utils import io_utils +from keras.utils.progbar import Progbar -@keras_core_export("keras_core.callbacks.ProgbarLogger") +@keras_export("keras.callbacks.ProgbarLogger") class ProgbarLogger(Callback): """Callback that prints metrics to stdout. diff --git a/keras_core/callbacks/reduce_lr_on_plateau.py b/keras/callbacks/reduce_lr_on_plateau.py similarity index 95% rename from keras_core/callbacks/reduce_lr_on_plateau.py rename to keras/callbacks/reduce_lr_on_plateau.py index ade530f96..c5a7f8eec 100644 --- a/keras_core/callbacks/reduce_lr_on_plateau.py +++ b/keras/callbacks/reduce_lr_on_plateau.py @@ -2,13 +2,13 @@ import warnings import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.utils import io_utils +from keras import backend +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.utils import io_utils -@keras_core_export("keras_core.callbacks.ReduceLROnPlateau") +@keras_export("keras.callbacks.ReduceLROnPlateau") class ReduceLROnPlateau(Callback): """Reduce learning rate when a metric has stopped improving. diff --git a/keras_core/callbacks/reduce_lr_on_plateau_test.py b/keras/callbacks/reduce_lr_on_plateau_test.py similarity index 93% rename from keras_core/callbacks/reduce_lr_on_plateau_test.py rename to keras/callbacks/reduce_lr_on_plateau_test.py index 015395d3a..10a8b0f47 100644 --- a/keras_core/callbacks/reduce_lr_on_plateau_test.py +++ b/keras/callbacks/reduce_lr_on_plateau_test.py @@ -1,13 +1,13 @@ import pytest -from keras_core import callbacks -from keras_core import layers -from keras_core import optimizers -from keras_core import testing -from keras_core.models import Sequential -from keras_core.testing import test_utils -from keras_core.utils import io_utils -from keras_core.utils import numerical_utils +from keras import callbacks +from keras import layers +from keras import optimizers +from keras import testing +from keras.models import Sequential +from keras.testing import test_utils +from keras.utils import io_utils +from keras.utils import numerical_utils class ReduceLROnPlateauTest(testing.TestCase): diff --git a/keras_core/callbacks/remote_monitor.py b/keras/callbacks/remote_monitor.py similarity index 94% rename from keras_core/callbacks/remote_monitor.py rename to keras/callbacks/remote_monitor.py index 328b0a9e6..2d1a0a533 100644 --- a/keras_core/callbacks/remote_monitor.py +++ b/keras/callbacks/remote_monitor.py @@ -3,8 +3,8 @@ import warnings import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback +from keras.api_export import keras_export +from keras.callbacks.callback import Callback try: import requests @@ -12,7 +12,7 @@ except ImportError: requests = None -@keras_core_export("keras_core.callbacks.RemoteMonitor") +@keras_export("keras.callbacks.RemoteMonitor") class RemoteMonitor(Callback): """Callback used to stream events to a server. diff --git a/keras_core/callbacks/remote_monitor_test.py b/keras/callbacks/remote_monitor_test.py similarity index 94% rename from keras_core/callbacks/remote_monitor_test.py rename to keras/callbacks/remote_monitor_test.py index a53fb7cab..310358042 100644 --- a/keras_core/callbacks/remote_monitor_test.py +++ b/keras/callbacks/remote_monitor_test.py @@ -3,12 +3,12 @@ from unittest import mock import numpy as np -from keras_core import backend -from keras_core import callbacks -from keras_core import layers -from keras_core import testing -from keras_core.models import Sequential -from keras_core.utils import numerical_utils +from keras import backend +from keras import callbacks +from keras import layers +from keras import testing +from keras.models import Sequential +from keras.utils import numerical_utils try: import requests diff --git a/keras_core/callbacks/tensorboard.py b/keras/callbacks/tensorboard.py similarity index 96% rename from keras_core/callbacks/tensorboard.py rename to keras/callbacks/tensorboard.py index 425707f4b..b45204445 100644 --- a/keras_core/callbacks/tensorboard.py +++ b/keras/callbacks/tensorboard.py @@ -6,16 +6,16 @@ import warnings import tree -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.layers import Embedding -from keras_core.optimizers import Optimizer -from keras_core.utils import file_utils +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.layers import Embedding +from keras.optimizers import Optimizer +from keras.utils import file_utils -@keras_core_export("keras_core.callbacks.TensorBoard") +@keras_export("keras.callbacks.TensorBoard") class TensorBoard(Callback): """Enable visualizations for TensorBoard. @@ -94,7 +94,7 @@ class TensorBoard(Callback): Basic usage: ```python - tensorboard_callback = keras_core.callbacks.TensorBoard(log_dir="./logs") + tensorboard_callback = keras.callbacks.TensorBoard(log_dir="./logs") model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # Then run the tensorboard command to view the visualizations. ``` @@ -102,10 +102,10 @@ class TensorBoard(Callback): Custom batch-level summaries in a subclassed Model: ```python - class MyModel(keras_core.Model): + class MyModel(keras.Model): def build(self, _): - self.dense = keras_core.layers.Dense(10) + self.dense = keras.layers.Dense(10) def call(self, x): outputs = self.dense(x) @@ -118,7 +118,7 @@ class TensorBoard(Callback): # Make sure to set `update_freq=N` to log a batch-level summary every N # batches. In addition to any `tf.summary` contained in `model.call()`, # metrics added in `Model.compile` will be logged every N batches. - tb_callback = keras_core.callbacks.TensorBoard('./logs', update_freq=1) + tb_callback = keras.callbacks.TensorBoard('./logs', update_freq=1) model.fit(x_train, y_train, callbacks=[tb_callback]) ``` @@ -129,16 +129,16 @@ class TensorBoard(Callback): tf.summary.histogram('x', x) return x - inputs = keras_core.Input(10) - x = keras_core.layers.Dense(10)(inputs) - outputs = keras_core.layers.Lambda(my_summary)(x) - model = keras_core.Model(inputs, outputs) + inputs = keras.Input(10) + x = keras.layers.Dense(10)(inputs) + outputs = keras.layers.Lambda(my_summary)(x) + model = keras.Model(inputs, outputs) model.compile('sgd', 'mse') # Make sure to set `update_freq=N` to log a batch-level summary every N # batches. In addition to any `tf.summary` contained in `Model.call`, # metrics added in `Model.compile` will be logged every N batches. - tb_callback = keras_core.callbacks.TensorBoard('./logs', update_freq=1) + tb_callback = keras.callbacks.TensorBoard('./logs', update_freq=1) model.fit(x_train, y_train, callbacks=[tb_callback]) ``` @@ -146,12 +146,12 @@ class TensorBoard(Callback): ```python # Profile a single batch, e.g. the 5th batch. - tensorboard_callback = keras_core.callbacks.TensorBoard( + tensorboard_callback = keras.callbacks.TensorBoard( log_dir='./logs', profile_batch=5) model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # Profile a range of batches, e.g. from 10 to 20. - tensorboard_callback = keras_core.callbacks.TensorBoard( + tensorboard_callback = keras.callbacks.TensorBoard( log_dir='./logs', profile_batch=(10,20)) model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) ``` @@ -301,7 +301,7 @@ class TensorBoard(Callback): ): raise ValueError( "Unrecognized `Embedding` layer names passed to " - "`keras_core.callbacks.TensorBoard` `embeddings_metadata` " + "`keras.callbacks.TensorBoard` `embeddings_metadata` " f"argument: {self.embeddings_metadata.keys()}" ) diff --git a/keras_core/callbacks/tensorboard_test.py b/keras/callbacks/tensorboard_test.py similarity index 98% rename from keras_core/callbacks/tensorboard_test.py rename to keras/callbacks/tensorboard_test.py index 7cddda3c9..62b6e59fe 100644 --- a/keras_core/callbacks/tensorboard_test.py +++ b/keras/callbacks/tensorboard_test.py @@ -9,15 +9,15 @@ from tensorflow.compat.v1 import SummaryMetadata from tensorflow.core.util import event_pb2 from tensorflow.python.lib.io import tf_record -from keras_core import backend -from keras_core import callbacks -from keras_core import layers -from keras_core import losses -from keras_core import models -from keras_core import ops -from keras_core import optimizers -from keras_core import testing -from keras_core.optimizers import schedules +from keras import backend +from keras import callbacks +from keras import layers +from keras import losses +from keras import models +from keras import ops +from keras import optimizers +from keras import testing +from keras.optimizers import schedules # Note: this file and tensorboard in general has a dependency on tensorflow diff --git a/keras_core/callbacks/terminate_on_nan.py b/keras/callbacks/terminate_on_nan.py similarity index 71% rename from keras_core/callbacks/terminate_on_nan.py rename to keras/callbacks/terminate_on_nan.py index 864aa9cf2..20eb5f2b3 100644 --- a/keras_core/callbacks/terminate_on_nan.py +++ b/keras/callbacks/terminate_on_nan.py @@ -1,11 +1,11 @@ import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.callbacks.callback import Callback -from keras_core.utils import io_utils +from keras.api_export import keras_export +from keras.callbacks.callback import Callback +from keras.utils import io_utils -@keras_core_export("keras_core.callbacks.TerminateOnNaN") +@keras_export("keras.callbacks.TerminateOnNaN") class TerminateOnNaN(Callback): """Callback that terminates training when a NaN loss is encountered.""" diff --git a/keras_core/callbacks/terminate_on_nan_test.py b/keras/callbacks/terminate_on_nan_test.py similarity index 87% rename from keras_core/callbacks/terminate_on_nan_test.py rename to keras/callbacks/terminate_on_nan_test.py index 8d61171c9..39b6cba51 100644 --- a/keras_core/callbacks/terminate_on_nan_test.py +++ b/keras/callbacks/terminate_on_nan_test.py @@ -1,12 +1,12 @@ import numpy as np import pytest -from keras_core import callbacks -from keras_core import initializers -from keras_core import layers -from keras_core import testing -from keras_core.models import Sequential -from keras_core.utils import numerical_utils +from keras import callbacks +from keras import initializers +from keras import layers +from keras import testing +from keras.models import Sequential +from keras.utils import numerical_utils class TerminateOnNaNTest(testing.TestCase): diff --git a/keras_core/constraints/__init__.py b/keras/constraints/__init__.py similarity index 67% rename from keras_core/constraints/__init__.py rename to keras/constraints/__init__.py index f36900b46..49c5c8bd2 100644 --- a/keras_core/constraints/__init__.py +++ b/keras/constraints/__init__.py @@ -1,13 +1,13 @@ import inspect -from keras_core.api_export import keras_core_export -from keras_core.constraints.constraints import Constraint -from keras_core.constraints.constraints import MaxNorm -from keras_core.constraints.constraints import MinMaxNorm -from keras_core.constraints.constraints import NonNeg -from keras_core.constraints.constraints import UnitNorm -from keras_core.saving import serialization_lib -from keras_core.utils.naming import to_snake_case +from keras.api_export import keras_export +from keras.constraints.constraints import Constraint +from keras.constraints.constraints import MaxNorm +from keras.constraints.constraints import MinMaxNorm +from keras.constraints.constraints import NonNeg +from keras.constraints.constraints import UnitNorm +from keras.saving import serialization_lib +from keras.utils.naming import to_snake_case ALL_OBJECTS = { Constraint, @@ -23,12 +23,12 @@ ALL_OBJECTS_DICT.update( ) -@keras_core_export("keras_core.constraints.serialize") +@keras_export("keras.constraints.serialize") def serialize(constraint): return serialization_lib.serialize_keras_object(constraint) -@keras_core_export("keras_core.constraints.deserialize") +@keras_export("keras.constraints.deserialize") def deserialize(config, custom_objects=None): """Return a Keras constraint object via its config.""" return serialization_lib.deserialize_keras_object( @@ -38,7 +38,7 @@ def deserialize(config, custom_objects=None): ) -@keras_core_export("keras_core.constraints.get") +@keras_export("keras.constraints.get") def get(identifier): """Retrieve a Keras constraint object via an identifier.""" if identifier is None: diff --git a/keras_core/constraints/constraints.py b/keras/constraints/constraints.py similarity index 89% rename from keras_core/constraints/constraints.py rename to keras/constraints/constraints.py index 55964fa4c..5740fb762 100644 --- a/keras_core/constraints/constraints.py +++ b/keras/constraints/constraints.py @@ -1,9 +1,9 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export +from keras import backend +from keras import ops +from keras.api_export import keras_export -@keras_core_export("keras_core.constraints.Constraint") +@keras_export("keras.constraints.Constraint") class Constraint: """Base class for weight constraints. @@ -16,7 +16,7 @@ class Constraint: Here's a simple example of a non-negative weight constraint: - >>> class NonNegative(keras_core.constraints.Constraint): + >>> class NonNegative(keras.constraints.Constraint): ... ... def __call__(self, w): ... return w * ops.cast(ops.greater_equal(w, 0.), dtype=w.dtype) @@ -27,7 +27,7 @@ class Constraint: Usage in a layer: - >>> keras_core.layers.Dense(4, kernel_constraint=NonNegative()) + >>> keras.layers.Dense(4, kernel_constraint=NonNegative()) """ def __call__(self, w): @@ -72,13 +72,13 @@ class Constraint: config: A Python dictionary, the output of `get_config()`. Returns: - A `keras_core.constraints.Constraint` instance. + A `keras.constraints.Constraint` instance. """ return cls(**config) -@keras_core_export( - ["keras_core.constraints.MaxNorm", "keras_core.constraints.max_norm"] +@keras_export( + ["keras.constraints.MaxNorm", "keras.constraints.max_norm"] ) class MaxNorm(Constraint): """MaxNorm weight constraint. @@ -86,7 +86,7 @@ class MaxNorm(Constraint): Constrains the weights incident to each hidden unit to have a norm less than or equal to a desired value. - Also available via the shortcut function `keras_core.constraints.max_norm`. + Also available via the shortcut function `keras.constraints.max_norm`. Args: max_value: the maximum norm value for the incoming weights. @@ -118,8 +118,8 @@ class MaxNorm(Constraint): return {"max_value": self.max_value, "axis": self.axis} -@keras_core_export( - ["keras_core.constraints.NonNeg", "keras_core.constraints.non_neg"] +@keras_export( + ["keras.constraints.NonNeg", "keras.constraints.non_neg"] ) class NonNeg(Constraint): """Constrains the weights to be non-negative.""" @@ -129,8 +129,8 @@ class NonNeg(Constraint): return w * ops.cast(ops.greater_equal(w, 0.0), dtype=w.dtype) -@keras_core_export( - ["keras_core.constraints.UnitNorm", "keras_core.constraints.unit_norm"] +@keras_export( + ["keras.constraints.UnitNorm", "keras.constraints.unit_norm"] ) class UnitNorm(Constraint): """Constrains the weights incident to each hidden unit to have unit norm. @@ -163,8 +163,8 @@ class UnitNorm(Constraint): return {"axis": self.axis} -@keras_core_export( - ["keras_core.constraints.MinMaxNorm", "keras_core.constraints.min_max_norm"] +@keras_export( + ["keras.constraints.MinMaxNorm", "keras.constraints.min_max_norm"] ) class MinMaxNorm(Constraint): """MinMaxNorm weight constraint. diff --git a/keras_core/constraints/constraints_test.py b/keras/constraints/constraints_test.py similarity index 95% rename from keras_core/constraints/constraints_test.py rename to keras/constraints/constraints_test.py index 509cdee48..338d495fc 100644 --- a/keras_core/constraints/constraints_test.py +++ b/keras/constraints/constraints_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras_core import backend -from keras_core import constraints -from keras_core import testing +from keras import backend +from keras import constraints +from keras import testing def get_example_array(): diff --git a/keras/datasets/__init__.py b/keras/datasets/__init__.py new file mode 100644 index 000000000..8fb280792 --- /dev/null +++ b/keras/datasets/__init__.py @@ -0,0 +1,10 @@ +"""Small NumPy datasets for debugging/testing.""" + +from keras.datasets import boston_housing +from keras.datasets import california_housing +from keras.datasets import cifar10 +from keras.datasets import cifar100 +from keras.datasets import fashion_mnist +from keras.datasets import imdb +from keras.datasets import mnist +from keras.datasets import reuters diff --git a/keras_core/datasets/boston_housing.py b/keras/datasets/boston_housing.py similarity index 60% rename from keras_core/datasets/boston_housing.py rename to keras/datasets/boston_housing.py index b442a6cdf..52be266a8 100644 --- a/keras_core/datasets/boston_housing.py +++ b/keras/datasets/boston_housing.py @@ -1,11 +1,11 @@ -from keras_core.api_export import keras_core_export +from keras.api_export import keras_export -@keras_core_export("keras_core.datasets.boston_housing.load_data") +@keras_export("keras.datasets.boston_housing.load_data") def load_data(path="boston_housing.npz", test_split=0.2, seed=113): raise NotImplementedError( "The Boston Housing dataset is no longer distributed with Keras. " "We recommend that you use instead the " "California Housing dataset, available via " - "`keras_core.datasets.california_housing.load_data()`." + "`keras.datasets.california_housing.load_data()`." ) diff --git a/keras_core/datasets/california_housing.py b/keras/datasets/california_housing.py similarity index 94% rename from keras_core/datasets/california_housing.py rename to keras/datasets/california_housing.py index 852bda725..fefae7d39 100644 --- a/keras_core/datasets/california_housing.py +++ b/keras/datasets/california_housing.py @@ -2,11 +2,11 @@ import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils.file_utils import get_file +from keras.api_export import keras_export +from keras.utils.file_utils import get_file -@keras_core_export("keras_core.datasets.boston_housing.load_data") +@keras_export("keras.datasets.boston_housing.load_data") def load_data(path="california_housing.npz", test_split=0.2, seed=113): """Loads the California Housing dataset. diff --git a/keras_core/datasets/cifar.py b/keras/datasets/cifar.py similarity index 100% rename from keras_core/datasets/cifar.py rename to keras/datasets/cifar.py diff --git a/keras_core/datasets/cifar10.py b/keras/datasets/cifar10.py similarity index 92% rename from keras_core/datasets/cifar10.py rename to keras/datasets/cifar10.py index 4c5069ad2..2010bfca0 100644 --- a/keras_core/datasets/cifar10.py +++ b/keras/datasets/cifar10.py @@ -4,13 +4,13 @@ import os import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.datasets.cifar import load_batch -from keras_core.utils.file_utils import get_file +from keras import backend +from keras.api_export import keras_export +from keras.datasets.cifar import load_batch +from keras.utils.file_utils import get_file -@keras_core_export("keras_core.datasets.cifar10.load_data") +@keras_export("keras.datasets.cifar10.load_data") def load_data(): """Loads the CIFAR10 dataset. diff --git a/keras_core/datasets/cifar100.py b/keras/datasets/cifar100.py similarity index 91% rename from keras_core/datasets/cifar100.py rename to keras/datasets/cifar100.py index a1d57cc67..a09cc0398 100644 --- a/keras_core/datasets/cifar100.py +++ b/keras/datasets/cifar100.py @@ -4,13 +4,13 @@ import os import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.datasets.cifar import load_batch -from keras_core.utils.file_utils import get_file +from keras import backend +from keras.api_export import keras_export +from keras.datasets.cifar import load_batch +from keras.utils.file_utils import get_file -@keras_core_export("keras_core.datasets.cifar100.load_data") +@keras_export("keras.datasets.cifar100.load_data") def load_data(label_mode="fine"): """Loads the CIFAR100 dataset. diff --git a/keras_core/datasets/fashion_mnist.py b/keras/datasets/fashion_mnist.py similarity index 94% rename from keras_core/datasets/fashion_mnist.py rename to keras/datasets/fashion_mnist.py index ed6f05ff6..286a7b18e 100644 --- a/keras_core/datasets/fashion_mnist.py +++ b/keras/datasets/fashion_mnist.py @@ -5,11 +5,11 @@ import os import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils.file_utils import get_file +from keras.api_export import keras_export +from keras.utils.file_utils import get_file -@keras_core_export("keras_core.datasets.fashion_mnist.load_data") +@keras_export("keras.datasets.fashion_mnist.load_data") def load_data(): """Loads the Fashion-MNIST dataset. diff --git a/keras_core/datasets/imdb.py b/keras/datasets/imdb.py similarity index 96% rename from keras_core/datasets/imdb.py rename to keras/datasets/imdb.py index 20fb1894e..cfff6b8ac 100644 --- a/keras_core/datasets/imdb.py +++ b/keras/datasets/imdb.py @@ -4,12 +4,12 @@ import json import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils.file_utils import get_file -from keras_core.utils.python_utils import remove_long_seq +from keras.api_export import keras_export +from keras.utils.file_utils import get_file +from keras.utils.python_utils import remove_long_seq -@keras_core_export("keras_core.datasets.imdb.load_data") +@keras_export("keras.datasets.imdb.load_data") def load_data( path="imdb.npz", num_words=None, @@ -137,7 +137,7 @@ def load_data( return (x_train, y_train), (x_test, y_test) -@keras_core_export("keras_core.datasets.imdb.get_word_index") +@keras_export("keras.datasets.imdb.get_word_index") def get_word_index(path="imdb_word_index.json"): """Retrieves a dict mapping words to their index in the IMDB dataset. diff --git a/keras_core/datasets/mnist.py b/keras/datasets/mnist.py similarity index 93% rename from keras_core/datasets/mnist.py rename to keras/datasets/mnist.py index c8b9c60ae..74d6b0d4f 100644 --- a/keras_core/datasets/mnist.py +++ b/keras/datasets/mnist.py @@ -2,11 +2,11 @@ import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils.file_utils import get_file +from keras.api_export import keras_export +from keras.utils.file_utils import get_file -@keras_core_export("keras_core.datasets.mnist.load_data") +@keras_export("keras.datasets.mnist.load_data") def load_data(path="mnist.npz"): """Loads the MNIST dataset. diff --git a/keras_core/datasets/reuters.py b/keras/datasets/reuters.py similarity index 95% rename from keras_core/datasets/reuters.py rename to keras/datasets/reuters.py index 74559c0e2..c94db532f 100644 --- a/keras_core/datasets/reuters.py +++ b/keras/datasets/reuters.py @@ -4,12 +4,12 @@ import json import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils.file_utils import get_file -from keras_core.utils.python_utils import remove_long_seq +from keras.api_export import keras_export +from keras.utils.file_utils import get_file +from keras.utils.python_utils import remove_long_seq -@keras_core_export("keras_core.datasets.reuters.load_data") +@keras_export("keras.datasets.reuters.load_data") def load_data( path="reuters.npz", num_words=None, @@ -132,7 +132,7 @@ def load_data( return (x_train, y_train), (x_test, y_test) -@keras_core_export("keras_core.datasets.reuters.get_word_index") +@keras_export("keras.datasets.reuters.get_word_index") def get_word_index(path="reuters_word_index.json"): """Retrieves a dict mapping words to their index in the Reuters dataset. @@ -162,7 +162,7 @@ def get_word_index(path="reuters_word_index.json"): return json.load(f) -@keras_core_export("keras_core.datasets.reuters.get_label_names") +@keras_export("keras.datasets.reuters.get_label_names") def get_label_names(): """Returns labels as a list of strings with indices matching training data. diff --git a/keras_core/distribution/__init__.py b/keras/distribution/__init__.py similarity index 100% rename from keras_core/distribution/__init__.py rename to keras/distribution/__init__.py diff --git a/keras_core/distribution/distribution_lib.py b/keras/distribution/distribution_lib.py similarity index 96% rename from keras_core/distribution/distribution_lib.py rename to keras/distribution/distribution_lib.py index 4b9cd1c99..a9f0d76e6 100644 --- a/keras_core/distribution/distribution_lib.py +++ b/keras/distribution/distribution_lib.py @@ -13,15 +13,15 @@ import warnings import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.backend import distribution_lib -from keras_core.backend.common import global_state +from keras.api_export import keras_export +from keras.backend import distribution_lib +from keras.backend.common import global_state DEFAULT_BATCH_DIM_NAME = "batch" GLOBAL_ATTRIBUTE_NAME = "distribution" -@keras_core_export("keras_core.distribution.list_devices") +@keras_export("keras.distribution.list_devices") def list_devices(device_type=None): """Return all the available devices based on the device type. @@ -39,7 +39,7 @@ def list_devices(device_type=None): return distribution_lib.list_devices(device_type) -@keras_core_export("keras_core.distribution.DeviceMesh") +@keras_export("keras.distribution.DeviceMesh") class DeviceMesh: """A cluster of computation devices for distributed computation. @@ -61,7 +61,7 @@ class DeviceMesh: match/create the `TensorLayout` when distribute the data and variables. devices: Optional list of devices. Defaults to all the available - devices locally from `keras_core.distribution.list_devices()`. + devices locally from `keras.distribution.list_devices()`. """ def __init__( @@ -108,7 +108,7 @@ class DeviceMesh: return self._devices -@keras_core_export("keras_core.distribution.TensorLayout") +@keras_export("keras.distribution.TensorLayout") class TensorLayout: """A layout to apply to a tensor. @@ -220,7 +220,7 @@ class Distribution: return self._device_mesh -@keras_core_export("keras_core.distribution.DataParallel") +@keras_export("keras.distribution.DataParallel") class DataParallel(Distribution): """Distribution for data parallelism. @@ -297,7 +297,7 @@ class DataParallel(Distribution): return TensorLayout(variable_shard_spec, self.device_mesh) -@keras_core_export("keras_core.distribution.ModelParallel") +@keras_export("keras.distribution.ModelParallel") class ModelParallel(Distribution): """Distribution that shards model variables. @@ -394,7 +394,7 @@ class ModelParallel(Distribution): return TensorLayout(variable_shard_spec, self.device_mesh) -@keras_core_export("keras_core.distribution.LayoutMap") +@keras_export("keras.distribution.LayoutMap") class LayoutMap(collections.abc.MutableMapping): """A dict-like object that maps string to `TensorLayout` instances. @@ -491,13 +491,13 @@ class LayoutMap(collections.abc.MutableMapping): layout.device_mesh = self.device_mesh -@keras_core_export("keras_core.distribution.distribution") +@keras_export("keras.distribution.distribution") def distribution(): """Retrieve the current distribution from global context.""" return global_state.get_global_attribute(GLOBAL_ATTRIBUTE_NAME) -@keras_core_export("keras_core.distribution.set_distribution") +@keras_export("keras.distribution.set_distribution") def set_distribution(value): """Set the distribution as the global distribution setting. diff --git a/keras_core/distribution/distribution_lib_test.py b/keras/distribution/distribution_lib_test.py similarity index 98% rename from keras_core/distribution/distribution_lib_test.py rename to keras/distribution/distribution_lib_test.py index ab8f4f128..9aa2241c4 100644 --- a/keras_core/distribution/distribution_lib_test.py +++ b/keras/distribution/distribution_lib_test.py @@ -7,12 +7,12 @@ import jax import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core.backend import distribution_lib as backend_dlib -from keras_core.distribution import distribution_lib +from keras import backend +from keras import layers +from keras import models +from keras import testing +from keras.backend import distribution_lib as backend_dlib +from keras.distribution import distribution_lib if backend.backend() == "jax": # Due to https://github.com/google/jax/issues/17188, we can't diff --git a/keras/export/__init__.py b/keras/export/__init__.py new file mode 100644 index 000000000..f59fb3dd0 --- /dev/null +++ b/keras/export/__init__.py @@ -0,0 +1 @@ +from keras.export.export_lib import ExportArchive diff --git a/keras_core/export/export_lib.py b/keras/export/export_lib.py similarity index 97% rename from keras_core/export/export_lib.py rename to keras/export/export_lib.py index 7af83a403..8796d8e3c 100644 --- a/keras_core/export/export_lib.py +++ b/keras/export/export_lib.py @@ -1,15 +1,15 @@ """Library for exporting inference-only Keras models/layers.""" -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers import Layer -from keras_core.models import Functional -from keras_core.models import Sequential -from keras_core.utils import io_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.layers import Layer +from keras.models import Functional +from keras.models import Sequential +from keras.utils import io_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.export.ExportArchive") +@keras_export("keras.export.ExportArchive") class ExportArchive: """ExportArchive is used to write SavedModel artifacts (e.g. for inference). @@ -215,7 +215,7 @@ class ExportArchive: a Functional model with 2 inputs): ```python - model = keras_core.Model(inputs=[x1, x2], outputs=outputs) + model = keras.Model(inputs=[x1, x2], outputs=outputs) export_archive = ExportArchive() export_archive.track(model) @@ -234,7 +234,7 @@ class ExportArchive: This also works with dictionary inputs: ```python - model = keras_core.Model(inputs={"x1": x1, "x2": x2}, outputs=outputs) + model = keras.Model(inputs={"x1": x1, "x2": x2}, outputs=outputs) export_archive = ExportArchive() export_archive.track(model) @@ -345,7 +345,7 @@ class ExportArchive: f"Received instead object of type '{type(variables)}'." ) # Ensure that all variables added are either tf.Variables - # or Variables created by Keras Core with the TF or JAX backends. + # or Variables created by Keras 3 with the TF or JAX backends. if not all( isinstance(v, (tf.Variable, backend.Variable)) for v in variables ): @@ -434,9 +434,9 @@ class ExportArchive: # Next, track lookup tables. # Hopefully, one day this will be automated at the tf.function level. self._tf_trackable._misc_assets = [] - from keras_core.layers import IntegerLookup - from keras_core.layers import StringLookup - from keras_core.layers import TextVectorization + from keras.layers import IntegerLookup + from keras.layers import StringLookup + from keras.layers import TextVectorization if hasattr(self, "_tracked"): for root in self._tracked: @@ -501,7 +501,7 @@ def _get_save_spec(model): return specs -@keras_core_export("keras_core.layers.TFSMLayer") +@keras_export("keras.layers.TFSMLayer") class TFSMLayer(Layer): """Reload a Keras model/layer that was saved via SavedModel / ExportArchive. diff --git a/keras_core/export/export_lib_test.py b/keras/export/export_lib_test.py similarity index 99% rename from keras_core/export/export_lib_test.py rename to keras/export/export_lib_test.py index 91f5c48a8..d65ad0c19 100644 --- a/keras_core/export/export_lib_test.py +++ b/keras/export/export_lib_test.py @@ -6,13 +6,13 @@ import numpy as np import pytest import tensorflow as tf -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core import utils -from keras_core.export import export_lib -from keras_core.saving import saving_lib +from keras import backend +from keras import layers +from keras import models +from keras import testing +from keras import utils +from keras.export import export_lib +from keras.saving import saving_lib def get_model(): diff --git a/keras_core/initializers/__init__.py b/keras/initializers/__init__.py similarity index 59% rename from keras_core/initializers/__init__.py rename to keras/initializers/__init__.py index 07cfbc6a8..e9019a2d4 100644 --- a/keras_core/initializers/__init__.py +++ b/keras/initializers/__init__.py @@ -1,24 +1,24 @@ import inspect -from keras_core.api_export import keras_core_export -from keras_core.initializers.constant_initializers import Constant -from keras_core.initializers.constant_initializers import Identity -from keras_core.initializers.constant_initializers import Ones -from keras_core.initializers.constant_initializers import Zeros -from keras_core.initializers.initializer import Initializer -from keras_core.initializers.random_initializers import GlorotNormal -from keras_core.initializers.random_initializers import GlorotUniform -from keras_core.initializers.random_initializers import HeNormal -from keras_core.initializers.random_initializers import HeUniform -from keras_core.initializers.random_initializers import LecunNormal -from keras_core.initializers.random_initializers import LecunUniform -from keras_core.initializers.random_initializers import OrthogonalInitializer -from keras_core.initializers.random_initializers import RandomNormal -from keras_core.initializers.random_initializers import RandomUniform -from keras_core.initializers.random_initializers import TruncatedNormal -from keras_core.initializers.random_initializers import VarianceScaling -from keras_core.saving import serialization_lib -from keras_core.utils.naming import to_snake_case +from keras.api_export import keras_export +from keras.initializers.constant_initializers import Constant +from keras.initializers.constant_initializers import Identity +from keras.initializers.constant_initializers import Ones +from keras.initializers.constant_initializers import Zeros +from keras.initializers.initializer import Initializer +from keras.initializers.random_initializers import GlorotNormal +from keras.initializers.random_initializers import GlorotUniform +from keras.initializers.random_initializers import HeNormal +from keras.initializers.random_initializers import HeUniform +from keras.initializers.random_initializers import LecunNormal +from keras.initializers.random_initializers import LecunUniform +from keras.initializers.random_initializers import OrthogonalInitializer +from keras.initializers.random_initializers import RandomNormal +from keras.initializers.random_initializers import RandomUniform +from keras.initializers.random_initializers import TruncatedNormal +from keras.initializers.random_initializers import VarianceScaling +from keras.saving import serialization_lib +from keras.utils.naming import to_snake_case ALL_OBJECTS = { Initializer, @@ -54,13 +54,13 @@ ALL_OBJECTS_DICT.update( ) -@keras_core_export("keras_core.initializers.serialize") +@keras_export("keras.initializers.serialize") def serialize(initializer): """Returns the initializer configuration as a Python dict.""" return serialization_lib.serialize_keras_object(initializer) -@keras_core_export("keras_core.initializers.deserialize") +@keras_export("keras.initializers.deserialize") def deserialize(config, custom_objects=None): """Returns a Keras initializer object via its configuration.""" return serialization_lib.deserialize_keras_object( @@ -70,7 +70,7 @@ def deserialize(config, custom_objects=None): ) -@keras_core_export("keras_core.initializers.get") +@keras_export("keras.initializers.get") def get(identifier): """Retrieves a Keras initializer object via an identifier. @@ -78,16 +78,16 @@ def get(identifier): (case-sensitively). >>> identifier = 'Ones' - >>> keras_core.initializers.deserialize(identifier) - <...keras_core.initializers.initializers.Ones...> + >>> keras.initializers.deserialize(identifier) + <...keras.initializers.initializers.Ones...> You can also specify `config` of the initializer to this function by passing dict containing `class_name` and `config` as an identifier. Also note that the `class_name` must map to a `Initializer` class. >>> cfg = {'class_name': 'Ones', 'config': {}} - >>> keras_core.initializers.deserialize(cfg) - <...keras_core.initializers.initializers.Ones...> + >>> keras.initializers.deserialize(cfg) + <...keras.initializers.initializers.Ones...> In the case that the `identifier` is a class, this method will return a new instance of the class by its constructor. diff --git a/keras_core/initializers/constant_initializers.py b/keras/initializers/constant_initializers.py similarity index 76% rename from keras_core/initializers/constant_initializers.py rename to keras/initializers/constant_initializers.py index 321e4e2e3..189195a2f 100644 --- a/keras_core/initializers/constant_initializers.py +++ b/keras/initializers/constant_initializers.py @@ -1,11 +1,11 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.backend import standardize_dtype -from keras_core.initializers.initializer import Initializer +from keras import ops +from keras.api_export import keras_export +from keras.backend import standardize_dtype +from keras.initializers.initializer import Initializer -@keras_core_export( - ["keras_core.initializers.Constant", "keras_core.initializers.constant"] +@keras_export( + ["keras.initializers.Constant", "keras.initializers.constant"] ) class Constant(Initializer): """Initializer that generates tensors with constant values. @@ -41,8 +41,8 @@ class Constant(Initializer): return {"value": self.value} -@keras_core_export( - ["keras_core.initializers.Zeros", "keras_core.initializers.zeros"] +@keras_export( + ["keras.initializers.Zeros", "keras.initializers.zeros"] ) class Zeros(Initializer): """Initializer that generates tensors initialized to 0. @@ -64,16 +64,16 @@ class Zeros(Initializer): Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes - are supported. If not specified, `keras_core.backend.floatx()` + are supported. If not specified, `keras.backend.floatx()` is used, which default to `float32` unless you configured it - otherwise (via `keras_core.backend.set_floatx(float_dtype)`). + otherwise (via `keras.backend.set_floatx(float_dtype)`). """ dtype = standardize_dtype(dtype) return ops.zeros(shape, dtype=dtype) -@keras_core_export( - ["keras_core.initializers.Ones", "keras_core.initializers.ones"] +@keras_export( + ["keras.initializers.Ones", "keras.initializers.ones"] ) class Ones(Initializer): """Initializer that generates tensors initialized to 1. @@ -97,19 +97,19 @@ class Ones(Initializer): Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes - are supported. If not specified, `keras_core.backend.floatx()` + are supported. If not specified, `keras.backend.floatx()` is used, which default to `float32` unless you configured it - otherwise (via `keras_core.backend.set_floatx(float_dtype)`). + otherwise (via `keras.backend.set_floatx(float_dtype)`). """ dtype = standardize_dtype(dtype) return ops.ones(shape, dtype=dtype) -@keras_core_export( +@keras_export( [ - "keras_core.initializers.IdentityInitializer", - "keras_core.initializers.Identity", - "keras_core.initializers.identity", + "keras.initializers.IdentityInitializer", + "keras.initializers.Identity", + "keras.initializers.identity", ] ) class Identity(Initializer): @@ -140,9 +140,9 @@ class Identity(Initializer): Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes - are supported. If not specified, `keras_core.backend.floatx()` + are supported. If not specified, `keras.backend.floatx()` is used, which default to `float32` unless you configured it - otherwise (via `keras_core.backend.set_floatx(float_dtype)`). + otherwise (via `keras.backend.set_floatx(float_dtype)`). """ if len(shape) != 2: raise ValueError( diff --git a/keras_core/initializers/constant_initializers_test.py b/keras/initializers/constant_initializers_test.py similarity index 94% rename from keras_core/initializers/constant_initializers_test.py rename to keras/initializers/constant_initializers_test.py index af0e8fe4f..8486d7f29 100644 --- a/keras_core/initializers/constant_initializers_test.py +++ b/keras/initializers/constant_initializers_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras_core import backend -from keras_core import initializers -from keras_core import testing +from keras import backend +from keras import initializers +from keras import testing class ConstantInitializersTest(testing.TestCase): diff --git a/keras_core/initializers/initializer.py b/keras/initializers/initializer.py similarity index 92% rename from keras_core/initializers/initializer.py rename to keras/initializers/initializer.py index 1d199738b..e28f269bc 100644 --- a/keras_core/initializers/initializer.py +++ b/keras/initializers/initializer.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export +from keras.api_export import keras_export -@keras_core_export( - ["keras_core.Initializer", "keras_core.initializers.Initializer"] +@keras_export( + ["keras.Initializer", "keras.initializers.Initializer"] ) class Initializer: """Initializer base class: all Keras initializers inherit from this class. @@ -29,7 +29,7 @@ class Initializer: self.stddev = stddev def __call__(self, shape, dtype=None, **kwargs): - return keras_core.random.normal( + return keras.random.normal( shape, mean=self.mean, stddev=self.stddev, dtype=dtype ) diff --git a/keras_core/initializers/random_initializers.py b/keras/initializers/random_initializers.py similarity index 88% rename from keras_core/initializers/random_initializers.py rename to keras/initializers/random_initializers.py index 07ae23d38..ed1d0e433 100644 --- a/keras_core/initializers/random_initializers.py +++ b/keras/initializers/random_initializers.py @@ -1,16 +1,16 @@ import math -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.backend import random -from keras_core.initializers.initializer import Initializer -from keras_core.saving import serialization_lib +from keras import ops +from keras.api_export import keras_export +from keras.backend import random +from keras.initializers.initializer import Initializer +from keras.saving import serialization_lib -@keras_core_export( +@keras_export( [ - "keras_core.initializers.RandomNormal", - "keras_core.initializers.random_normal", + "keras.initializers.RandomNormal", + "keras.initializers.random_normal", ] ) class RandomNormal(Initializer): @@ -34,13 +34,13 @@ class RandomNormal(Initializer): stddev: A python scalar or a scalar keras tensor. Standard deviation of the random values to generate. seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. """ def __init__(self, mean=0.0, stddev=0.05, seed=None): @@ -64,10 +64,10 @@ class RandomNormal(Initializer): return {"mean": self.mean, "stddev": self.stddev, "seed": seed_config} -@keras_core_export( +@keras_export( [ - "keras_core.initializers.TruncatedNormal", - "keras_core.initializers.truncated_normal", + "keras.initializers.TruncatedNormal", + "keras.initializers.truncated_normal", ] ) class TruncatedNormal(Initializer): @@ -94,13 +94,13 @@ class TruncatedNormal(Initializer): stddev: A python scalar or a scalar keras tensor. Standard deviation of the random values to generate. seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. """ def __init__(self, mean=0.0, stddev=0.05, seed=None): @@ -124,10 +124,10 @@ class TruncatedNormal(Initializer): return {"mean": self.mean, "stddev": self.stddev, "seed": seed_config} -@keras_core_export( +@keras_export( [ - "keras_core.initializers.RandomUniform", - "keras_core.initializers.random_uniform", + "keras.initializers.RandomUniform", + "keras.initializers.random_uniform", ] ) class RandomUniform(Initializer): @@ -151,13 +151,13 @@ class RandomUniform(Initializer): maxval: A python scalar or a scalar keras tensor. Upper bound of the range of random values to generate (exclusive). seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. """ def __init__(self, minval=-0.05, maxval=0.05, seed=None): @@ -185,10 +185,10 @@ class RandomUniform(Initializer): } -@keras_core_export( +@keras_export( [ - "keras_core.initializers.VarianceScaling", - "keras_core.initializers.variance_scaling", + "keras.initializers.VarianceScaling", + "keras.initializers.variance_scaling", ] ) class VarianceScaling(Initializer): @@ -224,13 +224,13 @@ class VarianceScaling(Initializer): distribution: Random distribution to use. One of `"truncated_normal"`, `"untruncated_normal"`, or `"uniform"`. seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. """ def __init__( @@ -305,10 +305,10 @@ class VarianceScaling(Initializer): } -@keras_core_export( +@keras_export( [ - "keras_core.initializers.GlorotUniform", - "keras_core.initializers.glorot_uniform", + "keras.initializers.GlorotUniform", + "keras.initializers.glorot_uniform", ] ) class GlorotUniform(VarianceScaling): @@ -330,13 +330,13 @@ class GlorotUniform(VarianceScaling): Args: seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. Reference: @@ -354,10 +354,10 @@ class GlorotUniform(VarianceScaling): } -@keras_core_export( +@keras_export( [ - "keras_core.initializers.GlorotNormal", - "keras_core.initializers.glorot_normal", + "keras.initializers.GlorotNormal", + "keras.initializers.glorot_normal", ] ) class GlorotNormal(VarianceScaling): @@ -380,13 +380,13 @@ class GlorotNormal(VarianceScaling): Args: seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. Reference: @@ -407,10 +407,10 @@ class GlorotNormal(VarianceScaling): } -@keras_core_export( +@keras_export( [ - "keras_core.initializers.LecunNormal", - "keras_core.initializers.lecun_normal", + "keras.initializers.LecunNormal", + "keras.initializers.lecun_normal", ] ) class LecunNormal(VarianceScaling): @@ -436,13 +436,13 @@ class LecunNormal(VarianceScaling): Args: seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. Reference: @@ -460,10 +460,10 @@ class LecunNormal(VarianceScaling): } -@keras_core_export( +@keras_export( [ - "keras_core.initializers.LecunUniform", - "keras_core.initializers.lecun_uniform", + "keras.initializers.LecunUniform", + "keras.initializers.lecun_uniform", ] ) class LecunUniform(VarianceScaling): @@ -485,13 +485,13 @@ class LecunUniform(VarianceScaling): Args: seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. Reference: @@ -509,8 +509,8 @@ class LecunUniform(VarianceScaling): } -@keras_core_export( - ["keras_core.initializers.HeNormal", "keras_core.initializers.he_normal"] +@keras_export( + ["keras.initializers.HeNormal", "keras.initializers.he_normal"] ) class HeNormal(VarianceScaling): """He normal initializer. @@ -531,13 +531,13 @@ class HeNormal(VarianceScaling): Args: seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. Reference: @@ -555,8 +555,8 @@ class HeNormal(VarianceScaling): } -@keras_core_export( - ["keras_core.initializers.HeUniform", "keras_core.initializers.he_uniform"] +@keras_export( + ["keras.initializers.HeUniform", "keras.initializers.he_uniform"] ) class HeUniform(VarianceScaling): """He uniform variance scaling initializer. @@ -577,13 +577,13 @@ class HeUniform(VarianceScaling): Args: seed: A Python integer or instance of - `keras_core.backend.SeedGenerator`. + `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.backend.SeedGenerator`. + of `keras.backend.SeedGenerator`. Reference: @@ -629,11 +629,11 @@ def compute_fans(shape): return int(fan_in), int(fan_out) -@keras_core_export( +@keras_export( [ - "keras_core.initializers.OrthogonalInitializer", - "keras_core.initializers.Orthogonal", - "keras_core.initializers.orthogonal", + "keras.initializers.OrthogonalInitializer", + "keras.initializers.Orthogonal", + "keras.initializers.orthogonal", ] ) class OrthogonalInitializer(Initializer): @@ -653,12 +653,12 @@ class OrthogonalInitializer(Initializer): Examples: >>> # Standalone usage: - >>> initializer = keras_core.initializers.Orthogonal() + >>> initializer = keras.initializers.Orthogonal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: - >>> initializer = keras_core.initializers.Orthogonal() - >>> layer = keras_core.layers.Dense(3, kernel_initializer=initializer) + >>> initializer = keras.initializers.Orthogonal() + >>> layer = keras.layers.Dense(3, kernel_initializer=initializer) Args: gain: Multiplicative factor to apply to the orthogonal matrix. diff --git a/keras_core/initializers/random_initializers_test.py b/keras/initializers/random_initializers_test.py similarity index 97% rename from keras_core/initializers/random_initializers_test.py rename to keras/initializers/random_initializers_test.py index 4dc769939..ef7909cf1 100644 --- a/keras_core/initializers/random_initializers_test.py +++ b/keras/initializers/random_initializers_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras_core import backend -from keras_core import initializers -from keras_core import testing -from keras_core import utils +from keras import backend +from keras import initializers +from keras import testing +from keras import utils class InitializersTest(testing.TestCase): diff --git a/keras/layers/__init__.py b/keras/layers/__init__.py new file mode 100644 index 000000000..a7c72eb05 --- /dev/null +++ b/keras/layers/__init__.py @@ -0,0 +1,170 @@ +from keras.api_export import keras_export +from keras.layers.activations.activation import Activation +from keras.layers.activations.elu import ELU +from keras.layers.activations.leaky_relu import LeakyReLU +from keras.layers.activations.prelu import PReLU +from keras.layers.activations.relu import ReLU +from keras.layers.activations.softmax import Softmax +from keras.layers.attention.additive_attention import AdditiveAttention +from keras.layers.attention.attention import Attention +from keras.layers.attention.multi_head_attention import MultiHeadAttention +from keras.layers.convolutional.conv1d import Conv1D +from keras.layers.convolutional.conv1d_transpose import Conv1DTranspose +from keras.layers.convolutional.conv2d import Conv2D +from keras.layers.convolutional.conv2d_transpose import Conv2DTranspose +from keras.layers.convolutional.conv3d import Conv3D +from keras.layers.convolutional.conv3d_transpose import Conv3DTranspose +from keras.layers.convolutional.depthwise_conv1d import DepthwiseConv1D +from keras.layers.convolutional.depthwise_conv2d import DepthwiseConv2D +from keras.layers.convolutional.separable_conv1d import SeparableConv1D +from keras.layers.convolutional.separable_conv2d import SeparableConv2D +from keras.layers.core.dense import Dense +from keras.layers.core.einsum_dense import EinsumDense +from keras.layers.core.embedding import Embedding +from keras.layers.core.identity import Identity +from keras.layers.core.input_layer import Input +from keras.layers.core.input_layer import InputLayer +from keras.layers.core.lambda_layer import Lambda +from keras.layers.core.masking import Masking +from keras.layers.core.wrapper import Wrapper +from keras.layers.layer import Layer +from keras.layers.merging.add import Add +from keras.layers.merging.add import add +from keras.layers.merging.average import Average +from keras.layers.merging.average import average +from keras.layers.merging.concatenate import Concatenate +from keras.layers.merging.concatenate import concatenate +from keras.layers.merging.dot import Dot +from keras.layers.merging.dot import dot +from keras.layers.merging.maximum import Maximum +from keras.layers.merging.maximum import maximum +from keras.layers.merging.minimum import Minimum +from keras.layers.merging.minimum import minimum +from keras.layers.merging.multiply import Multiply +from keras.layers.merging.multiply import multiply +from keras.layers.merging.subtract import Subtract +from keras.layers.merging.subtract import subtract +from keras.layers.normalization.batch_normalization import ( + BatchNormalization, +) +from keras.layers.normalization.group_normalization import ( + GroupNormalization, +) +from keras.layers.normalization.layer_normalization import ( + LayerNormalization, +) +from keras.layers.normalization.spectral_normalization import ( + SpectralNormalization, +) +from keras.layers.normalization.unit_normalization import UnitNormalization +from keras.layers.pooling.average_pooling1d import AveragePooling1D +from keras.layers.pooling.average_pooling2d import AveragePooling2D +from keras.layers.pooling.average_pooling3d import AveragePooling3D +from keras.layers.pooling.global_average_pooling1d import ( + GlobalAveragePooling1D, +) +from keras.layers.pooling.global_average_pooling2d import ( + GlobalAveragePooling2D, +) +from keras.layers.pooling.global_average_pooling3d import ( + GlobalAveragePooling3D, +) +from keras.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D +from keras.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D +from keras.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D +from keras.layers.pooling.max_pooling1d import MaxPooling1D +from keras.layers.pooling.max_pooling2d import MaxPooling2D +from keras.layers.pooling.max_pooling3d import MaxPooling3D +from keras.layers.preprocessing.category_encoding import CategoryEncoding +from keras.layers.preprocessing.center_crop import CenterCrop +from keras.layers.preprocessing.discretization import Discretization +from keras.layers.preprocessing.hashed_crossing import HashedCrossing +from keras.layers.preprocessing.hashing import Hashing +from keras.layers.preprocessing.index_lookup import IndexLookup +from keras.layers.preprocessing.integer_lookup import IntegerLookup +from keras.layers.preprocessing.normalization import Normalization +from keras.layers.preprocessing.random_brightness import RandomBrightness +from keras.layers.preprocessing.random_contrast import RandomContrast +from keras.layers.preprocessing.random_crop import RandomCrop +from keras.layers.preprocessing.random_flip import RandomFlip +from keras.layers.preprocessing.random_rotation import RandomRotation +from keras.layers.preprocessing.random_translation import RandomTranslation +from keras.layers.preprocessing.random_zoom import RandomZoom +from keras.layers.preprocessing.rescaling import Rescaling +from keras.layers.preprocessing.resizing import Resizing +from keras.layers.preprocessing.string_lookup import StringLookup +from keras.layers.preprocessing.text_vectorization import TextVectorization +from keras.layers.regularization.activity_regularization import ( + ActivityRegularization, +) +from keras.layers.regularization.dropout import Dropout +from keras.layers.regularization.gaussian_dropout import GaussianDropout +from keras.layers.regularization.gaussian_noise import GaussianNoise +from keras.layers.regularization.spatial_dropout import SpatialDropout1D +from keras.layers.regularization.spatial_dropout import SpatialDropout2D +from keras.layers.regularization.spatial_dropout import SpatialDropout3D +from keras.layers.reshaping.cropping1d import Cropping1D +from keras.layers.reshaping.cropping2d import Cropping2D +from keras.layers.reshaping.cropping3d import Cropping3D +from keras.layers.reshaping.flatten import Flatten +from keras.layers.reshaping.permute import Permute +from keras.layers.reshaping.repeat_vector import RepeatVector +from keras.layers.reshaping.reshape import Reshape +from keras.layers.reshaping.up_sampling1d import UpSampling1D +from keras.layers.reshaping.up_sampling2d import UpSampling2D +from keras.layers.reshaping.up_sampling3d import UpSampling3D +from keras.layers.reshaping.zero_padding1d import ZeroPadding1D +from keras.layers.reshaping.zero_padding2d import ZeroPadding2D +from keras.layers.reshaping.zero_padding3d import ZeroPadding3D +from keras.layers.rnn.bidirectional import Bidirectional +from keras.layers.rnn.conv_lstm1d import ConvLSTM1D +from keras.layers.rnn.conv_lstm2d import ConvLSTM2D +from keras.layers.rnn.conv_lstm3d import ConvLSTM3D +from keras.layers.rnn.gru import GRU +from keras.layers.rnn.gru import GRUCell +from keras.layers.rnn.lstm import LSTM +from keras.layers.rnn.lstm import LSTMCell +from keras.layers.rnn.rnn import RNN +from keras.layers.rnn.simple_rnn import SimpleRNN +from keras.layers.rnn.simple_rnn import SimpleRNNCell +from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.layers.rnn.time_distributed import TimeDistributed +from keras.saving import serialization_lib + + +@keras_export("keras.layers.serialize") +def serialize(layer): + """Returns the layer configuration as a Python dict. + + Args: + layer: A `keras.layers.Layer` instance to serialize. + + Returns: + Python dict which contains the configuration of the layer. + """ + return serialization_lib.serialize_keras_object(layer) + + +@keras_export("keras.layers.deserialize") +def deserialize(config, custom_objects=None): + """Returns a Keras layer object via its configuration. + + Args: + config: A python dict containing a serialized layer configuration. + custom_objects: Optional dictionary mapping names (strings) to custom + objects (classes and functions) to be considered during + deserialization. + + Returns: + A Keras layer instance. + """ + obj = serialization_lib.deserialize_keras_object( + config, + custom_objects=custom_objects, + ) + if not isinstance(obj, Layer): + raise ValueError( + "`keras.layers.deserialize` was passed a `config` object that is " + f"not a `keras.layers.Layer`. Received: {config}" + ) + return obj diff --git a/keras/layers/activations/__init__.py b/keras/layers/activations/__init__.py new file mode 100644 index 000000000..176e43fe1 --- /dev/null +++ b/keras/layers/activations/__init__.py @@ -0,0 +1,5 @@ +from keras.layers.activations.elu import ELU +from keras.layers.activations.leaky_relu import LeakyReLU +from keras.layers.activations.prelu import PReLU +from keras.layers.activations.relu import ReLU +from keras.layers.activations.softmax import Softmax diff --git a/keras_core/layers/activations/activation.py b/keras/layers/activations/activation.py similarity index 70% rename from keras_core/layers/activations/activation.py rename to keras/layers/activations/activation.py index 523242897..438bd3f28 100644 --- a/keras_core/layers/activations/activation.py +++ b/keras/layers/activations/activation.py @@ -1,23 +1,23 @@ -from keras_core import activations -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import activations +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Activation") +@keras_export("keras.layers.Activation") class Activation(Layer): """Applies an activation function to an output. Args: activation: Activation function. It could be a callable, or the name of - an activation from the `keras_core.activations` namespace. + an activation from the `keras.activations` namespace. **kwargs: Base layer keyword arguments, such as `name` and `dtype`. Example: - >>> layer = keras_core.layers.Activation('relu') + >>> layer = keras.layers.Activation('relu') >>> layer([-3.0, -1.0, 0.0, 2.0]) [0.0, 0.0, 0.0, 2.0] - >>> layer = keras_core.layers.Activation(keras_core.activations.relu) + >>> layer = keras.layers.Activation(keras.activations.relu) >>> layer([-3.0, -1.0, 0.0, 2.0]) [0.0, 0.0, 0.0, 2.0] """ diff --git a/keras_core/layers/activations/activation_test.py b/keras/layers/activations/activation_test.py similarity index 91% rename from keras_core/layers/activations/activation_test.py rename to keras/layers/activations/activation_test.py index b03ffce07..355f0602c 100644 --- a/keras_core/layers/activations/activation_test.py +++ b/keras/layers/activations/activation_test.py @@ -1,8 +1,8 @@ import pytest -from keras_core import activations -from keras_core import layers -from keras_core import testing +from keras import activations +from keras import layers +from keras import testing class ActivationTest(testing.TestCase): diff --git a/keras_core/layers/activations/elu.py b/keras/layers/activations/elu.py similarity index 79% rename from keras_core/layers/activations/elu.py rename to keras/layers/activations/elu.py index 534a07d9b..ed8abdf33 100644 --- a/keras_core/layers/activations/elu.py +++ b/keras/layers/activations/elu.py @@ -1,9 +1,9 @@ -from keras_core import activations -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import activations +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.ELU") +@keras_export("keras.layers.ELU") class ELU(Layer): """Applies an Exponential Linear Unit function to an output. diff --git a/keras_core/layers/activations/elu_test.py b/keras/layers/activations/elu_test.py similarity index 90% rename from keras_core/layers/activations/elu_test.py rename to keras/layers/activations/elu_test.py index 77c13ac4f..1af54ffd4 100644 --- a/keras_core/layers/activations/elu_test.py +++ b/keras/layers/activations/elu_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras_core import testing -from keras_core.layers.activations import elu +from keras import testing +from keras.layers.activations import elu class ELUTest(testing.TestCase): diff --git a/keras_core/layers/activations/leaky_relu.py b/keras/layers/activations/leaky_relu.py similarity index 90% rename from keras_core/layers/activations/leaky_relu.py rename to keras/layers/activations/leaky_relu.py index c81c264bf..fae75abaa 100644 --- a/keras_core/layers/activations/leaky_relu.py +++ b/keras/layers/activations/leaky_relu.py @@ -1,11 +1,11 @@ import warnings -from keras_core import activations -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import activations +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.LeakyReLU") +@keras_export("keras.layers.LeakyReLU") class LeakyReLU(Layer): """Leaky version of a Rectified Linear Unit activation layer. diff --git a/keras_core/layers/activations/leaky_relu_test.py b/keras/layers/activations/leaky_relu_test.py similarity index 92% rename from keras_core/layers/activations/leaky_relu_test.py rename to keras/layers/activations/leaky_relu_test.py index 18e24033e..8665ce119 100644 --- a/keras_core/layers/activations/leaky_relu_test.py +++ b/keras/layers/activations/leaky_relu_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras_core import testing -from keras_core.layers.activations import leaky_relu +from keras import testing +from keras.layers.activations import leaky_relu class LeakyReLUTest(testing.TestCase): diff --git a/keras_core/layers/activations/prelu.py b/keras/layers/activations/prelu.py similarity index 90% rename from keras_core/layers/activations/prelu.py rename to keras/layers/activations/prelu.py index ca847ed56..4f2a0a2e7 100644 --- a/keras_core/layers/activations/prelu.py +++ b/keras/layers/activations/prelu.py @@ -1,13 +1,13 @@ -from keras_core import activations -from keras_core import constraints -from keras_core import initializers -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras import activations +from keras import constraints +from keras import initializers +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.PReLU") +@keras_export("keras.layers.PReLU") class PReLU(Layer): """Parametric Rectified Linear Unit activation layer. diff --git a/keras_core/layers/activations/prelu_test.py b/keras/layers/activations/prelu_test.py similarity index 93% rename from keras_core/layers/activations/prelu_test.py rename to keras/layers/activations/prelu_test.py index 73666c774..82785de10 100644 --- a/keras_core/layers/activations/prelu_test.py +++ b/keras/layers/activations/prelu_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras_core import testing -from keras_core.layers.activations import prelu +from keras import testing +from keras.layers.activations import prelu class PReLUTest(testing.TestCase): diff --git a/keras_core/layers/activations/relu.py b/keras/layers/activations/relu.py similarity index 91% rename from keras_core/layers/activations/relu.py rename to keras/layers/activations/relu.py index 5b538156a..e7efa03d7 100644 --- a/keras_core/layers/activations/relu.py +++ b/keras/layers/activations/relu.py @@ -1,9 +1,9 @@ -from keras_core import activations -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import activations +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.ReLU") +@keras_export("keras.layers.ReLU") class ReLU(Layer): """Rectified Linear Unit activation function layer. @@ -17,7 +17,7 @@ class ReLU(Layer): Example: ``` python - relu_layer = keras_core.layers.activations.ReLU( + relu_layer = keras.layers.activations.ReLU( max_value=10, negative_slope=0.5, threshold=0, diff --git a/keras_core/layers/activations/relu_test.py b/keras/layers/activations/relu_test.py similarity index 97% rename from keras_core/layers/activations/relu_test.py rename to keras/layers/activations/relu_test.py index c00c2d301..c423f76a3 100644 --- a/keras_core/layers/activations/relu_test.py +++ b/keras/layers/activations/relu_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras_core import testing -from keras_core.layers.activations import relu +from keras import testing +from keras.layers.activations import relu class ReLUTest(testing.TestCase): diff --git a/keras_core/layers/activations/softmax.py b/keras/layers/activations/softmax.py similarity index 88% rename from keras_core/layers/activations/softmax.py rename to keras/layers/activations/softmax.py index 018a0f52e..e29f2ab7c 100644 --- a/keras_core/layers/activations/softmax.py +++ b/keras/layers/activations/softmax.py @@ -1,7 +1,7 @@ -from keras_core import activations -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import activations +from keras import backend +from keras.api_export import keras_export +from keras.layers.layer import Layer def _large_negative_number(dtype): @@ -11,7 +11,7 @@ def _large_negative_number(dtype): return -1e9 -@keras_core_export("keras_core.layers.Softmax") +@keras_export("keras.layers.Softmax") class Softmax(Layer): """Softmax activation layer. @@ -22,7 +22,7 @@ class Softmax(Layer): ``` Example: - >>>softmax_layer = keras_core.layers.activations.Softmax() + >>>softmax_layer = keras.layers.activations.Softmax() >>>input = np.array([1.0, 2.0, 1.0]) >>>result = softmax_layer(input) [0.21194157, 0.5761169, 0.21194157] diff --git a/keras_core/layers/activations/softmax_test.py b/keras/layers/activations/softmax_test.py similarity index 95% rename from keras_core/layers/activations/softmax_test.py rename to keras/layers/activations/softmax_test.py index 466538827..fb3484799 100644 --- a/keras_core/layers/activations/softmax_test.py +++ b/keras/layers/activations/softmax_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras_core import testing -from keras_core.layers.activations import softmax +from keras import testing +from keras.layers.activations import softmax class SoftmaxTest(testing.TestCase): diff --git a/keras_core/layers/attention/__init__.py b/keras/layers/attention/__init__.py similarity index 100% rename from keras_core/layers/attention/__init__.py rename to keras/layers/attention/__init__.py diff --git a/keras_core/layers/attention/additive_attention.py b/keras/layers/attention/additive_attention.py similarity index 95% rename from keras_core/layers/attention/additive_attention.py rename to keras/layers/attention/additive_attention.py index 4f7b104d2..f0103673d 100644 --- a/keras_core/layers/attention/additive_attention.py +++ b/keras/layers/attention/additive_attention.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.attention.attention import Attention +from keras import ops +from keras.api_export import keras_export +from keras.layers.attention.attention import Attention -@keras_core_export("keras_core.layers.AdditiveAttention") +@keras_export("keras.layers.AdditiveAttention") class AdditiveAttention(Attention): """Additive attention layer, a.k.a. Bahdanau-style attention. diff --git a/keras_core/layers/attention/additive_attention_test.py b/keras/layers/attention/additive_attention_test.py similarity index 97% rename from keras_core/layers/attention/additive_attention_test.py rename to keras/layers/attention/additive_attention_test.py index 19ca4a2fc..5aa7e7f2e 100644 --- a/keras_core/layers/attention/additive_attention_test.py +++ b/keras/layers/attention/additive_attention_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class AdditiveAttentionTest(testing.TestCase): diff --git a/keras_core/layers/attention/attention.py b/keras/layers/attention/attention.py similarity index 98% rename from keras_core/layers/attention/attention.py rename to keras/layers/attention/attention.py index 1bd4cbb4f..30027a3ff 100644 --- a/keras_core/layers/attention/attention.py +++ b/keras/layers/attention/attention.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Attention") +@keras_export("keras.layers.Attention") class Attention(Layer): """Dot-product attention layer, a.k.a. Luong-style attention. diff --git a/keras_core/layers/attention/attention_test.py b/keras/layers/attention/attention_test.py similarity index 98% rename from keras_core/layers/attention/attention_test.py rename to keras/layers/attention/attention_test.py index 1009199e9..9f27259e6 100644 --- a/keras_core/layers/attention/attention_test.py +++ b/keras/layers/attention/attention_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class AttentionTest(testing.TestCase): diff --git a/keras_core/layers/attention/multi_head_attention.py b/keras/layers/attention/multi_head_attention.py similarity index 98% rename from keras_core/layers/attention/multi_head_attention.py rename to keras/layers/attention/multi_head_attention.py index 36dd1e15c..24e3bcc02 100644 --- a/keras_core/layers/attention/multi_head_attention.py +++ b/keras/layers/attention/multi_head_attention.py @@ -4,18 +4,18 @@ import string import numpy as np -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.activations.softmax import Softmax -from keras_core.layers.core.einsum_dense import EinsumDense -from keras_core.layers.layer import Layer -from keras_core.layers.regularization.dropout import Dropout +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.activations.softmax import Softmax +from keras.layers.core.einsum_dense import EinsumDense +from keras.layers.layer import Layer +from keras.layers.regularization.dropout import Dropout -@keras_core_export("keras_core.layers.MultiHeadAttention") +@keras_export("keras.layers.MultiHeadAttention") class MultiHeadAttention(Layer): """MultiHeadAttention layer. diff --git a/keras_core/layers/attention/multi_head_attention_test.py b/keras/layers/attention/multi_head_attention_test.py similarity index 98% rename from keras_core/layers/attention/multi_head_attention_test.py rename to keras/layers/attention/multi_head_attention_test.py index 61a76788d..088e49d6c 100644 --- a/keras_core/layers/attention/multi_head_attention_test.py +++ b/keras/layers/attention/multi_head_attention_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import initializers +from keras import layers +from keras import testing class MultiHeadAttentionTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/convolutional/__init__.py b/keras/layers/convolutional/__init__.py similarity index 100% rename from keras_core/layers/convolutional/__init__.py rename to keras/layers/convolutional/__init__.py diff --git a/keras_core/layers/convolutional/base_conv.py b/keras/layers/convolutional/base_conv.py similarity index 95% rename from keras_core/layers/convolutional/base_conv.py rename to keras/layers/convolutional/base_conv.py index af014d33f..c55850138 100644 --- a/keras_core/layers/convolutional/base_conv.py +++ b/keras/layers/convolutional/base_conv.py @@ -1,16 +1,16 @@ """Keras base class for convolution layers.""" -from keras_core import activations -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.backend import standardize_data_format -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.ops.operation_utils import compute_conv_output_shape -from keras_core.utils.argument_validation import standardize_padding -from keras_core.utils.argument_validation import standardize_tuple +from keras import activations +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.backend import standardize_data_format +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.ops.operation_utils import compute_conv_output_shape +from keras.utils.argument_validation import standardize_padding +from keras.utils.argument_validation import standardize_tuple class BaseConv(Layer): diff --git a/keras_core/layers/convolutional/base_conv_transpose.py b/keras/layers/convolutional/base_conv_transpose.py similarity index 95% rename from keras_core/layers/convolutional/base_conv_transpose.py rename to keras/layers/convolutional/base_conv_transpose.py index 1b41e4f8a..ec5ab1e23 100644 --- a/keras_core/layers/convolutional/base_conv_transpose.py +++ b/keras/layers/convolutional/base_conv_transpose.py @@ -1,18 +1,18 @@ """Keras base class for transpose convolution layers.""" -from keras_core import activations -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.backend import standardize_data_format -from keras_core.backend.common.backend_utils import ( +from keras import activations +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.backend import standardize_data_format +from keras.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils.argument_validation import standardize_padding -from keras_core.utils.argument_validation import standardize_tuple +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils.argument_validation import standardize_padding +from keras.utils.argument_validation import standardize_tuple class BaseConvTranspose(Layer): diff --git a/keras_core/layers/convolutional/base_depthwise_conv.py b/keras/layers/convolutional/base_depthwise_conv.py similarity index 95% rename from keras_core/layers/convolutional/base_depthwise_conv.py rename to keras/layers/convolutional/base_depthwise_conv.py index 037a9fcd4..0bd00308b 100644 --- a/keras_core/layers/convolutional/base_depthwise_conv.py +++ b/keras/layers/convolutional/base_depthwise_conv.py @@ -1,16 +1,16 @@ """Keras base class for depthwise convolution layers.""" -from keras_core import activations -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.backend import standardize_data_format -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.ops.operation_utils import compute_conv_output_shape -from keras_core.utils.argument_validation import standardize_padding -from keras_core.utils.argument_validation import standardize_tuple +from keras import activations +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.backend import standardize_data_format +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.ops.operation_utils import compute_conv_output_shape +from keras.utils.argument_validation import standardize_padding +from keras.utils.argument_validation import standardize_tuple class BaseDepthwiseConv(Layer): diff --git a/keras_core/layers/convolutional/base_separable_conv.py b/keras/layers/convolutional/base_separable_conv.py similarity index 95% rename from keras_core/layers/convolutional/base_separable_conv.py rename to keras/layers/convolutional/base_separable_conv.py index 39aad853d..0a60cd61a 100644 --- a/keras_core/layers/convolutional/base_separable_conv.py +++ b/keras/layers/convolutional/base_separable_conv.py @@ -1,17 +1,17 @@ """Keras abstract base layer for separable convolution.""" -from keras_core import activations -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.backend import standardize_data_format -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.ops.operation_utils import compute_conv_output_shape -from keras_core.utils.argument_validation import standardize_padding -from keras_core.utils.argument_validation import standardize_tuple +from keras import activations +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.backend import standardize_data_format +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.ops.operation_utils import compute_conv_output_shape +from keras.utils.argument_validation import standardize_padding +from keras.utils.argument_validation import standardize_tuple class BaseSeparableConv(Layer): diff --git a/keras_core/layers/convolutional/conv1d.py b/keras/layers/convolutional/conv1d.py similarity index 95% rename from keras_core/layers/convolutional/conv1d.py rename to keras/layers/convolutional/conv1d.py index 3c46deea0..4caa29514 100644 --- a/keras_core/layers/convolutional/conv1d.py +++ b/keras/layers/convolutional/conv1d.py @@ -1,10 +1,10 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_conv import BaseConv +from keras import ops +from keras.api_export import keras_export +from keras.layers.convolutional.base_conv import BaseConv -@keras_core_export( - ["keras_core.layers.Conv1D", "keras_core.layers.Convolution1D"] +@keras_export( + ["keras.layers.Conv1D", "keras.layers.Convolution1D"] ) class Conv1D(BaseConv): """1D convolution layer (e.g. temporal convolution). @@ -87,7 +87,7 @@ class Conv1D(BaseConv): >>> # The inputs are 128-length vectors with 10 timesteps, and the >>> # batch size is 4. >>> x = np.random.rand(4, 10, 128) - >>> y = keras_core.layers.Conv1D(32, 3, activation='relu')(x) + >>> y = keras.layers.Conv1D(32, 3, activation='relu')(x) >>> print(y.shape) (4, 8, 32) """ diff --git a/keras_core/layers/convolutional/conv1d_transpose.py b/keras/layers/convolutional/conv1d_transpose.py similarity index 94% rename from keras_core/layers/convolutional/conv1d_transpose.py rename to keras/layers/convolutional/conv1d_transpose.py index d0cc58626..3d57c261c 100644 --- a/keras_core/layers/convolutional/conv1d_transpose.py +++ b/keras/layers/convolutional/conv1d_transpose.py @@ -1,13 +1,13 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_conv_transpose import ( +from keras.api_export import keras_export +from keras.layers.convolutional.base_conv_transpose import ( BaseConvTranspose, ) -@keras_core_export( +@keras_export( [ - "keras_core.layers.Conv1DTranspose", - "keras_core.layers.Convolution1DTranspose", + "keras.layers.Conv1DTranspose", + "keras.layers.Convolution1DTranspose", ] ) class Conv1DTranspose(BaseConvTranspose): @@ -86,7 +86,7 @@ class Conv1DTranspose(BaseConvTranspose): Examples: >>> x = np.random.rand(4, 10, 128) - >>> y = keras_core.layers.Conv1DTranspose(32, 3, 2, activation='relu')(x) + >>> y = keras.layers.Conv1DTranspose(32, 3, 2, activation='relu')(x) >>> print(y.shape) (4, 21, 32) """ diff --git a/keras_core/layers/convolutional/conv2d.py b/keras/layers/convolutional/conv2d.py similarity index 95% rename from keras_core/layers/convolutional/conv2d.py rename to keras/layers/convolutional/conv2d.py index 40706d329..b6c644fde 100644 --- a/keras_core/layers/convolutional/conv2d.py +++ b/keras/layers/convolutional/conv2d.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_conv import BaseConv +from keras.api_export import keras_export +from keras.layers.convolutional.base_conv import BaseConv -@keras_core_export( - ["keras_core.layers.Conv2D", "keras_core.layers.Convolution2D"] +@keras_export( + ["keras.layers.Conv2D", "keras.layers.Convolution2D"] ) class Conv2D(BaseConv): """2D convolution layer. @@ -81,7 +81,7 @@ class Conv2D(BaseConv): Examples: >>> x = np.random.rand(4, 10, 10, 128) - >>> y = keras_core.layers.Conv2D(32, 3, activation='relu')(x) + >>> y = keras.layers.Conv2D(32, 3, activation='relu')(x) >>> print(y.shape) (4, 8, 8, 32) """ diff --git a/keras_core/layers/convolutional/conv2d_transpose.py b/keras/layers/convolutional/conv2d_transpose.py similarity index 94% rename from keras_core/layers/convolutional/conv2d_transpose.py rename to keras/layers/convolutional/conv2d_transpose.py index 35ed112aa..c0c435c38 100644 --- a/keras_core/layers/convolutional/conv2d_transpose.py +++ b/keras/layers/convolutional/conv2d_transpose.py @@ -1,13 +1,13 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_conv_transpose import ( +from keras.api_export import keras_export +from keras.layers.convolutional.base_conv_transpose import ( BaseConvTranspose, ) -@keras_core_export( +@keras_export( [ - "keras_core.layers.Conv2DTranspose", - "keras_core.layers.Convolution2DTranspose", + "keras.layers.Conv2DTranspose", + "keras.layers.Convolution2DTranspose", ] ) class Conv2DTranspose(BaseConvTranspose): @@ -88,7 +88,7 @@ class Conv2DTranspose(BaseConvTranspose): Examples: >>> x = np.random.rand(4, 10, 8, 128) - >>> y = keras_core.layers.Conv2DTranspose(32, 2, 2, activation='relu')(x) + >>> y = keras.layers.Conv2DTranspose(32, 2, 2, activation='relu')(x) >>> print(y.shape) (4, 20, 16, 32) """ diff --git a/keras_core/layers/convolutional/conv3d.py b/keras/layers/convolutional/conv3d.py similarity index 95% rename from keras_core/layers/convolutional/conv3d.py rename to keras/layers/convolutional/conv3d.py index 801b1faed..8cb189fbb 100644 --- a/keras_core/layers/convolutional/conv3d.py +++ b/keras/layers/convolutional/conv3d.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_conv import BaseConv +from keras.api_export import keras_export +from keras.layers.convolutional.base_conv import BaseConv -@keras_core_export( - ["keras_core.layers.Conv3D", "keras_core.layers.Convolution3D"] +@keras_export( + ["keras.layers.Conv3D", "keras.layers.Convolution3D"] ) class Conv3D(BaseConv): """3D convolution layer. @@ -87,7 +87,7 @@ class Conv3D(BaseConv): Examples: >>> x = np.random.rand(4, 10, 10, 10, 128) - >>> y = keras_core.layers.Conv3D(32, 3, activation='relu')(x) + >>> y = keras.layers.Conv3D(32, 3, activation='relu')(x) >>> print(y.shape) (4, 8, 8, 8, 32) """ diff --git a/keras_core/layers/convolutional/conv3d_transpose.py b/keras/layers/convolutional/conv3d_transpose.py similarity index 94% rename from keras_core/layers/convolutional/conv3d_transpose.py rename to keras/layers/convolutional/conv3d_transpose.py index e418a62b1..9d7320d97 100644 --- a/keras_core/layers/convolutional/conv3d_transpose.py +++ b/keras/layers/convolutional/conv3d_transpose.py @@ -1,13 +1,13 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_conv_transpose import ( +from keras.api_export import keras_export +from keras.layers.convolutional.base_conv_transpose import ( BaseConvTranspose, ) -@keras_core_export( +@keras_export( [ - "keras_core.layers.Conv3DTranspose", - "keras_core.layers.Convolution3DTranspose", + "keras.layers.Conv3DTranspose", + "keras.layers.Convolution3DTranspose", ] ) class Conv3DTranspose(BaseConvTranspose): @@ -93,7 +93,7 @@ class Conv3DTranspose(BaseConvTranspose): Examples: >>> x = np.random.rand(4, 10, 8, 12, 128) - >>> y = keras_core.layers.Conv3DTranspose(32, 2, 2, activation='relu')(x) + >>> y = keras.layers.Conv3DTranspose(32, 2, 2, activation='relu')(x) >>> print(y.shape) (4, 20, 16, 24, 32) """ diff --git a/keras_core/layers/convolutional/conv_test.py b/keras/layers/convolutional/conv_test.py similarity index 99% rename from keras_core/layers/convolutional/conv_test.py rename to keras/layers/convolutional/conv_test.py index 57c092f2f..67ba91d3b 100644 --- a/keras_core/layers/convolutional/conv_test.py +++ b/keras/layers/convolutional/conv_test.py @@ -3,8 +3,8 @@ import pytest from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing def _same_padding(input_size, kernel_size, stride): diff --git a/keras_core/layers/convolutional/conv_transpose_test.py b/keras/layers/convolutional/conv_transpose_test.py similarity index 98% rename from keras_core/layers/convolutional/conv_transpose_test.py rename to keras/layers/convolutional/conv_transpose_test.py index 831390bfd..62ac23219 100644 --- a/keras_core/layers/convolutional/conv_transpose_test.py +++ b/keras/layers/convolutional/conv_transpose_test.py @@ -2,16 +2,16 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import testing -from keras_core.backend.common.backend_utils import ( +from keras import backend +from keras import layers +from keras import testing +from keras.backend.common.backend_utils import ( _convert_conv_tranpose_padding_args_from_keras_to_torch, ) -from keras_core.backend.common.backend_utils import ( +from keras.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras_core.backend.common.backend_utils import ( +from keras.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) @@ -787,7 +787,7 @@ class ConvTransposeCorrectnessTest(testing.TestCase, parameterized.TestCase): dilation_rate=1, ) - # keras-core layer + # keras layer kc_layer = layers.Conv1DTranspose( filters=1, kernel_size=kernel_size, diff --git a/keras_core/layers/convolutional/depthwise_conv1d.py b/keras/layers/convolutional/depthwise_conv1d.py similarity index 95% rename from keras_core/layers/convolutional/depthwise_conv1d.py rename to keras/layers/convolutional/depthwise_conv1d.py index 766bc1962..4bf0344ef 100644 --- a/keras_core/layers/convolutional/depthwise_conv1d.py +++ b/keras/layers/convolutional/depthwise_conv1d.py @@ -1,10 +1,10 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_depthwise_conv import ( +from keras.api_export import keras_export +from keras.layers.convolutional.base_depthwise_conv import ( BaseDepthwiseConv, ) -@keras_core_export("keras_core.layers.DepthwiseConv1D") +@keras_export("keras.layers.DepthwiseConv1D") class DepthwiseConv1D(BaseDepthwiseConv): """1D depthwise convolution layer. @@ -92,7 +92,7 @@ class DepthwiseConv1D(BaseDepthwiseConv): Examples: >>> x = np.random.rand(4, 10, 12) - >>> y = keras_core.layers.DepthwiseConv1D(3, 3, 2, activation='relu')(x) + >>> y = keras.layers.DepthwiseConv1D(3, 3, 2, activation='relu')(x) >>> print(y.shape) (4, 4, 36) """ diff --git a/keras_core/layers/convolutional/depthwise_conv2d.py b/keras/layers/convolutional/depthwise_conv2d.py similarity index 95% rename from keras_core/layers/convolutional/depthwise_conv2d.py rename to keras/layers/convolutional/depthwise_conv2d.py index aba65d466..fd6db4e65 100644 --- a/keras_core/layers/convolutional/depthwise_conv2d.py +++ b/keras/layers/convolutional/depthwise_conv2d.py @@ -1,10 +1,10 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_depthwise_conv import ( +from keras.api_export import keras_export +from keras.layers.convolutional.base_depthwise_conv import ( BaseDepthwiseConv, ) -@keras_core_export("keras_core.layers.DepthwiseConv2D") +@keras_export("keras.layers.DepthwiseConv2D") class DepthwiseConv2D(BaseDepthwiseConv): """2D depthwise convolution layer. @@ -92,7 +92,7 @@ class DepthwiseConv2D(BaseDepthwiseConv): Examples: >>> x = np.random.rand(4, 10, 10, 12) - >>> y = keras_core.layers.DepthwiseConv2D(3, 3, activation='relu')(x) + >>> y = keras.layers.DepthwiseConv2D(3, 3, activation='relu')(x) >>> print(y.shape) (4, 8, 8, 36) """ diff --git a/keras_core/layers/convolutional/depthwise_conv_test.py b/keras/layers/convolutional/depthwise_conv_test.py similarity index 99% rename from keras_core/layers/convolutional/depthwise_conv_test.py rename to keras/layers/convolutional/depthwise_conv_test.py index 27d00a65d..3c1708c13 100644 --- a/keras_core/layers/convolutional/depthwise_conv_test.py +++ b/keras/layers/convolutional/depthwise_conv_test.py @@ -3,8 +3,8 @@ import pytest from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing def _same_padding(input_size, kernel_size, stride): diff --git a/keras_core/layers/convolutional/separable_conv1d.py b/keras/layers/convolutional/separable_conv1d.py similarity index 95% rename from keras_core/layers/convolutional/separable_conv1d.py rename to keras/layers/convolutional/separable_conv1d.py index 51d47dc24..56dd8fc1d 100644 --- a/keras_core/layers/convolutional/separable_conv1d.py +++ b/keras/layers/convolutional/separable_conv1d.py @@ -1,13 +1,13 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_separable_conv import ( +from keras.api_export import keras_export +from keras.layers.convolutional.base_separable_conv import ( BaseSeparableConv, ) -@keras_core_export( +@keras_export( [ - "keras_core.layers.SeparableConv1D", - "keras_core.layers.SeparableConvolution1D", + "keras.layers.SeparableConv1D", + "keras.layers.SeparableConvolution1D", ] ) class SeparableConv1D(BaseSeparableConv): @@ -90,7 +90,7 @@ class SeparableConv1D(BaseSeparableConv): Examples: >>> x = np.random.rand(4, 10, 12) - >>> y = keras_core.layers.SeparableConv1D(3, 4, 3, 2, activation='relu')(x) + >>> y = keras.layers.SeparableConv1D(3, 4, 3, 2, activation='relu')(x) >>> print(y.shape) (4, 4, 4) """ diff --git a/keras_core/layers/convolutional/separable_conv2d.py b/keras/layers/convolutional/separable_conv2d.py similarity index 95% rename from keras_core/layers/convolutional/separable_conv2d.py rename to keras/layers/convolutional/separable_conv2d.py index 5a9d48324..468f188d4 100644 --- a/keras_core/layers/convolutional/separable_conv2d.py +++ b/keras/layers/convolutional/separable_conv2d.py @@ -1,13 +1,13 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.convolutional.base_separable_conv import ( +from keras.api_export import keras_export +from keras.layers.convolutional.base_separable_conv import ( BaseSeparableConv, ) -@keras_core_export( +@keras_export( [ - "keras_core.layers.SeparableConv2D", - "keras_core.layers.SeparableConvolution2D", + "keras.layers.SeparableConv2D", + "keras.layers.SeparableConvolution2D", ] ) class SeparableConv2D(BaseSeparableConv): @@ -90,7 +90,7 @@ class SeparableConv2D(BaseSeparableConv): Examples: >>> x = np.random.rand(4, 10, 10, 12) - >>> y = keras_core.layers.SeparableConv2D(3, 4, 3, 2, activation='relu')(x) + >>> y = keras.layers.SeparableConv2D(3, 4, 3, 2, activation='relu')(x) >>> print(y.shape) (4, 4, 4, 4) """ diff --git a/keras_core/layers/convolutional/separable_conv_test.py b/keras/layers/convolutional/separable_conv_test.py similarity index 97% rename from keras_core/layers/convolutional/separable_conv_test.py rename to keras/layers/convolutional/separable_conv_test.py index 7609dbfdf..731516401 100644 --- a/keras_core/layers/convolutional/separable_conv_test.py +++ b/keras/layers/convolutional/separable_conv_test.py @@ -2,14 +2,14 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import testing -from keras_core.layers.convolutional.conv_test import np_conv1d -from keras_core.layers.convolutional.conv_test import np_conv2d -from keras_core.layers.convolutional.depthwise_conv_test import ( +from keras import layers +from keras import testing +from keras.layers.convolutional.conv_test import np_conv1d +from keras.layers.convolutional.conv_test import np_conv2d +from keras.layers.convolutional.depthwise_conv_test import ( np_depthwise_conv1d, ) -from keras_core.layers.convolutional.depthwise_conv_test import ( +from keras.layers.convolutional.depthwise_conv_test import ( np_depthwise_conv2d, ) diff --git a/keras_core/layers/core/__init__.py b/keras/layers/core/__init__.py similarity index 100% rename from keras_core/layers/core/__init__.py rename to keras/layers/core/__init__.py diff --git a/keras_core/layers/core/dense.py b/keras/layers/core/dense.py similarity index 93% rename from keras_core/layers/core/dense.py rename to keras/layers/core/dense.py index d2b50fb30..5756b5049 100644 --- a/keras_core/layers/core/dense.py +++ b/keras/layers/core/dense.py @@ -1,14 +1,14 @@ -from keras_core import activations -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras import activations +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Dense") +@keras_export("keras.layers.Dense") class Dense(Layer): """Just your regular densely-connected NN layer. diff --git a/keras_core/layers/core/dense_test.py b/keras/layers/core/dense_test.py similarity index 95% rename from keras_core/layers/core/dense_test.py rename to keras/layers/core/dense_test.py index 5dfb36938..752bfb2ab 100644 --- a/keras_core/layers/core/dense_test.py +++ b/keras/layers/core/dense_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import testing -from keras_core.backend.common import keras_tensor +from keras import backend +from keras import layers +from keras import testing +from keras.backend.common import keras_tensor class DenseTest(testing.TestCase): diff --git a/keras_core/layers/core/einsum_dense.py b/keras/layers/core/einsum_dense.py similarity index 94% rename from keras_core/layers/core/einsum_dense.py rename to keras/layers/core/einsum_dense.py index af20a5e3c..2db897cae 100644 --- a/keras_core/layers/core/einsum_dense.py +++ b/keras/layers/core/einsum_dense.py @@ -1,15 +1,15 @@ import re -from keras_core import activations -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import activations +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.EinsumDense") +@keras_export("keras.layers.EinsumDense") class EinsumDense(Layer): """A layer that uses `einsum` as the backing computation. @@ -48,12 +48,12 @@ class EinsumDense(Layer): This example shows how to instantiate a standard Keras dense layer using einsum operations. This example is equivalent to - `keras_core.layers.Dense(64, use_bias=True)`. + `keras.layers.Dense(64, use_bias=True)`. - >>> layer = keras_core.layers.EinsumDense("ab,bc->ac", + >>> layer = keras.layers.EinsumDense("ab,bc->ac", ... output_shape=64, ... bias_axes="c") - >>> input_tensor = keras_core.Input(shape=[32]) + >>> input_tensor = keras.Input(shape=[32]) >>> output_tensor = layer(input_tensor) >>> output_tensor.shape (None, 64) @@ -66,10 +66,10 @@ class EinsumDense(Layer): dimension in the `output_shape` is `None`, because the sequence dimension `b` has an unknown shape. - >>> layer = keras_core.layers.EinsumDense("abc,cd->abd", + >>> layer = keras.layers.EinsumDense("abc,cd->abd", ... output_shape=(None, 64), ... bias_axes="d") - >>> input_tensor = keras_core.Input(shape=[32, 128]) + >>> input_tensor = keras.Input(shape=[32, 128]) >>> output_tensor = layer(input_tensor) >>> output_tensor.shape (None, 32, 64) @@ -85,10 +85,10 @@ class EinsumDense(Layer): layer can handle any number of sequence dimensions - including the case where no sequence dimension exists. - >>> layer = keras_core.layers.EinsumDense("...x,xy->...y", + >>> layer = keras.layers.EinsumDense("...x,xy->...y", ... output_shape=64, ... bias_axes="y") - >>> input_tensor = keras_core.Input(shape=[32, 128]) + >>> input_tensor = keras.Input(shape=[32, 128]) >>> output_tensor = layer(input_tensor) >>> output_tensor.shape (None, 32, 64) diff --git a/keras_core/layers/core/einsum_dense_test.py b/keras/layers/core/einsum_dense_test.py similarity index 99% rename from keras_core/layers/core/einsum_dense_test.py rename to keras/layers/core/einsum_dense_test.py index 5fcab7afc..6c816c25d 100644 --- a/keras_core/layers/core/einsum_dense_test.py +++ b/keras/layers/core/einsum_dense_test.py @@ -1,8 +1,8 @@ import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class EinsumDenseTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/core/embedding.py b/keras/layers/core/embedding.py similarity index 91% rename from keras_core/layers/core/embedding.py rename to keras/layers/core/embedding.py index b0fcc49f0..32266309b 100644 --- a/keras_core/layers/core/embedding.py +++ b/keras/layers/core/embedding.py @@ -1,12 +1,12 @@ -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Embedding") +@keras_export("keras.layers.Embedding") class Embedding(Layer): """Turns positive integers (indexes) into dense vectors of fixed size. @@ -16,8 +16,8 @@ class Embedding(Layer): Example: - >>> model = keras_core.Sequential() - >>> model.add(keras_core.layers.Embedding(1000, 64, input_length=10)) + >>> model = keras.Sequential() + >>> model.add(keras.layers.Embedding(1000, 64, input_length=10)) >>> # The model will take as input an integer matrix of size (batch, >>> # input_length), and the largest integer (i.e. word index) in the input >>> # should be no larger than 999 (vocabulary size). diff --git a/keras_core/layers/core/embedding_test.py b/keras/layers/core/embedding_test.py similarity index 96% rename from keras_core/layers/core/embedding_test.py rename to keras/layers/core/embedding_test.py index 6c3d698d2..529252acc 100644 --- a/keras_core/layers/core/embedding_test.py +++ b/keras/layers/core/embedding_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core.testing import test_case +from keras import backend +from keras import layers +from keras.testing import test_case class EmbeddingTest(test_case.TestCase): diff --git a/keras_core/layers/core/identity.py b/keras/layers/core/identity.py similarity index 71% rename from keras_core/layers/core/identity.py rename to keras/layers/core/identity.py index 02bad96ef..b04ab3a08 100644 --- a/keras_core/layers/core/identity.py +++ b/keras/layers/core/identity.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Identity") +@keras_export("keras.layers.Identity") class Identity(Layer): """Identity layer. diff --git a/keras_core/layers/core/identity_test.py b/keras/layers/core/identity_test.py similarity index 91% rename from keras_core/layers/core/identity_test.py rename to keras/layers/core/identity_test.py index 0e5141f60..456a292e1 100644 --- a/keras_core/layers/core/identity_test.py +++ b/keras/layers/core/identity_test.py @@ -1,9 +1,9 @@ import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class IdentityTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/core/input_layer.py b/keras/layers/core/input_layer.py similarity index 94% rename from keras_core/layers/core/input_layer.py rename to keras/layers/core/input_layer.py index af7200b15..161afeebf 100644 --- a/keras_core/layers/core/input_layer.py +++ b/keras/layers/core/input_layer.py @@ -1,12 +1,12 @@ import warnings -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.ops.node import Node +from keras import backend +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.ops.node import Node -@keras_core_export("keras_core.layers.InputLayer") +@keras_export("keras.layers.InputLayer") class InputLayer(Layer): def __init__( self, @@ -84,7 +84,7 @@ class InputLayer(Layer): } -@keras_core_export(["keras_core.layers.Input", "keras_core.Input"]) +@keras_export(["keras.layers.Input", "keras.Input"]) def Input( shape=None, batch_size=None, diff --git a/keras_core/layers/core/input_layer_test.py b/keras/layers/core/input_layer_test.py similarity index 96% rename from keras_core/layers/core/input_layer_test.py rename to keras/layers/core/input_layer_test.py index b6640c90f..2e10ac427 100644 --- a/keras_core/layers/core/input_layer_test.py +++ b/keras/layers/core/input_layer_test.py @@ -1,10 +1,10 @@ import numpy as np from absl.testing import parameterized -from keras_core import backend -from keras_core import testing -from keras_core.backend import KerasTensor -from keras_core.layers import InputLayer +from keras import backend +from keras import testing +from keras.backend import KerasTensor +from keras.layers import InputLayer class InputLayerTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/core/lambda_layer.py b/keras/layers/core/lambda_layer.py similarity index 95% rename from keras_core/layers/core/lambda_layer.py rename to keras/layers/core/lambda_layer.py index a8f355f15..145759764 100644 --- a/keras_core/layers/core/lambda_layer.py +++ b/keras/layers/core/lambda_layer.py @@ -3,15 +3,15 @@ import types import tree -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.saving import serialization_lib -from keras_core.utils import python_utils -from keras_core.utils import shape_utils +from keras import backend +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.saving import serialization_lib +from keras.utils import python_utils +from keras.utils import shape_utils -@keras_core_export("keras_core.layers.Lambda") +@keras_export("keras.layers.Lambda") class Lambda(Layer): """Wraps arbitrary expressions as a `Layer` object. @@ -177,7 +177,7 @@ class Lambda(Layer): "config artifact, you can override this error " "by passing `safe_mode=False` " "to `from_config()`, or calling " - "`keras_core.config.enable_unsafe_deserialization()." + "`keras.config.enable_unsafe_deserialization()." ) @classmethod diff --git a/keras_core/layers/core/lambda_layer_test.py b/keras/layers/core/lambda_layer_test.py similarity index 97% rename from keras_core/layers/core/lambda_layer_test.py rename to keras/layers/core/lambda_layer_test.py index 34ffc46d9..d65cf8bba 100644 --- a/keras_core/layers/core/lambda_layer_test.py +++ b/keras/layers/core/lambda_layer_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import layers +from keras import ops +from keras import testing class LambdaTest(testing.TestCase): diff --git a/keras_core/layers/core/masking.py b/keras/layers/core/masking.py similarity index 87% rename from keras_core/layers/core/masking.py rename to keras/layers/core/masking.py index 23e867801..f7a05eb48 100644 --- a/keras_core/layers/core/masking.py +++ b/keras/layers/core/masking.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Masking") +@keras_export("keras.layers.Masking") class Masking(Layer): """Masks a sequence by using a mask value to skip timesteps. @@ -31,9 +31,9 @@ class Masking(Layer): inputs[:, 3, :] = 0. inputs[:, 5, :] = 0. - model = keras_core.models.Sequential() - model.add(keras_core.layers.Masking(mask_value=0.) - model.add(keras_core.layers.LSTM(32)) + model = keras.models.Sequential() + model.add(keras.layers.Masking(mask_value=0.) + model.add(keras.layers.LSTM(32)) output = model(inputs) # The time step 3 and 5 will be skipped from LSTM calculation. ``` diff --git a/keras_core/layers/core/masking_test.py b/keras/layers/core/masking_test.py similarity index 94% rename from keras_core/layers/core/masking_test.py rename to keras/layers/core/masking_test.py index 7cfff6aea..115783dbc 100644 --- a/keras_core/layers/core/masking_test.py +++ b/keras/layers/core/masking_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import layers -from keras_core import models -from keras_core import testing +from keras import layers +from keras import models +from keras import testing class MaskingTest(testing.TestCase): diff --git a/keras_core/layers/core/wrapper.py b/keras/layers/core/wrapper.py similarity index 87% rename from keras_core/layers/core/wrapper.py rename to keras/layers/core/wrapper.py index d5310eb7a..ee7f13477 100644 --- a/keras_core/layers/core/wrapper.py +++ b/keras/layers/core/wrapper.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.saving import serialization_lib +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.saving import serialization_lib -@keras_core_export("keras_core.layers.Wrapper") +@keras_export("keras.layers.Wrapper") class Wrapper(Layer): """Abstract wrapper base class. diff --git a/keras_core/layers/core/wrapper_test.py b/keras/layers/core/wrapper_test.py similarity index 96% rename from keras_core/layers/core/wrapper_test.py rename to keras/layers/core/wrapper_test.py index a25f9ae8c..eb5fa9aed 100644 --- a/keras_core/layers/core/wrapper_test.py +++ b/keras/layers/core/wrapper_test.py @@ -1,8 +1,8 @@ import pytest -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import layers +from keras import ops +from keras import testing class ExampleWrapper(layers.Wrapper): diff --git a/keras_core/layers/input_spec.py b/keras/layers/input_spec.py similarity index 98% rename from keras_core/layers/input_spec.py rename to keras/layers/input_spec.py index 52f17e203..20cea734b 100644 --- a/keras_core/layers/input_spec.py +++ b/keras/layers/input_spec.py @@ -1,10 +1,10 @@ import tree -from keras_core import backend -from keras_core.api_export import keras_core_export +from keras import backend +from keras.api_export import keras_export -@keras_core_export(["keras_core.InputSpec", "keras_core.layers.InputSpec"]) +@keras_export(["keras.InputSpec", "keras.layers.InputSpec"]) class InputSpec: """Specifies the rank, dtype and shape of every input to a layer. diff --git a/keras_core/layers/layer.py b/keras/layers/layer.py similarity index 97% rename from keras_core/layers/layer.py rename to keras/layers/layer.py index ac4c4b631..86024e6fd 100644 --- a/keras_core/layers/layer.py +++ b/keras/layers/layer.py @@ -22,38 +22,38 @@ from functools import wraps import tree -from keras_core import backend -from keras_core import initializers -from keras_core import mixed_precision -from keras_core import regularizers -from keras_core import utils -from keras_core.api_export import keras_core_export -from keras_core.backend import KerasTensor -from keras_core.backend.common import global_state -from keras_core.layers import input_spec -from keras_core.metrics.metric import Metric -from keras_core.ops.operation import Operation -from keras_core.utils import python_utils -from keras_core.utils import summary_utils -from keras_core.utils import traceback_utils -from keras_core.utils import tracking -from keras_core.utils.shape_utils import map_shape_structure +from keras import backend +from keras import initializers +from keras import mixed_precision +from keras import regularizers +from keras import utils +from keras.api_export import keras_export +from keras.backend import KerasTensor +from keras.backend.common import global_state +from keras.layers import input_spec +from keras.metrics.metric import Metric +from keras.ops.operation import Operation +from keras.utils import python_utils +from keras.utils import summary_utils +from keras.utils import traceback_utils +from keras.utils import tracking +from keras.utils.shape_utils import map_shape_structure if backend.backend() == "tensorflow": - from keras_core.backend.tensorflow.layer import TFLayer as BackendLayer + from keras.backend.tensorflow.layer import TFLayer as BackendLayer elif backend.backend() == "jax": - from keras_core.backend.jax.layer import JaxLayer as BackendLayer + from keras.backend.jax.layer import JaxLayer as BackendLayer elif backend.backend() == "torch": - from keras_core.backend.torch.layer import TorchLayer as BackendLayer + from keras.backend.torch.layer import TorchLayer as BackendLayer elif backend.backend() == "numpy": - from keras_core.backend.numpy.layer import NumpyLayer as BackendLayer + from keras.backend.numpy.layer import NumpyLayer as BackendLayer else: raise RuntimeError( f"Backend '{backend.backend()}' must implement a layer mixin class." ) -@keras_core_export(["keras_core.Layer", "keras_core.layers.Layer"]) +@keras_export(["keras.Layer", "keras.layers.Layer"]) class Layer(BackendLayer, Operation): """This is the class from which all layers inherit. @@ -78,12 +78,12 @@ class Layer(BackendLayer, Operation): trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights. Can also be a - `keras_core.mixed_precision.DTypePolicy`, + `keras.mixed_precision.DTypePolicy`, which allows the computation and weight dtype to differ. Defaults to `None`. `None` means to use - `keras_core.mixed_precision.dtype_policy()`, + `keras.mixed_precision.dtype_policy()`, which is a `float32` policy unless set to different value - (via `keras_core.mixed_precision.set_dtype_policy()`). + (via `keras.mixed_precision.set_dtype_policy()`). Attributes: name: The name of the layer (string). @@ -93,7 +93,7 @@ class Layer(BackendLayer, Operation): Layers automatically cast inputs to this dtype, which causes the computations and output to also be in this dtype. When mixed precision is used with a - `keras_core.mixed_precision.DTypePolicy`, this will be different + `keras.mixed_precision.DTypePolicy`, this will be different than `variable_dtype`. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be diff --git a/keras_core/layers/layer_test.py b/keras/layers/layer_test.py similarity index 99% rename from keras_core/layers/layer_test.py rename to keras/layers/layer_test.py index 72429b1ef..527758838 100644 --- a/keras_core/layers/layer_test.py +++ b/keras/layers/layer_test.py @@ -1,12 +1,12 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import metrics -from keras_core import models -from keras_core import ops -from keras_core import testing +from keras import backend +from keras import layers +from keras import metrics +from keras import models +from keras import ops +from keras import testing class LayerTest(testing.TestCase): diff --git a/keras_core/layers/merging/__init__.py b/keras/layers/merging/__init__.py similarity index 100% rename from keras_core/layers/merging/__init__.py rename to keras/layers/merging/__init__.py diff --git a/keras/layers/merging/add.py b/keras/layers/merging/add.py new file mode 100644 index 000000000..968f7f374 --- /dev/null +++ b/keras/layers/merging/add.py @@ -0,0 +1,69 @@ +from keras import ops +from keras.api_export import keras_export +from keras.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Add") +class Add(Merge): + """Performs elementwise addition operation. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Add()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `added = keras.layers.add([x1, x2])` + >>> added = keras.layers.Add()([x1, x2]) + >>> out = keras.layers.Dense(4)(added) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.add(output, inputs[i]) + return output + + +@keras_export("keras.layers.add") +def add(inputs, **kwargs): + """Functional interface to the `keras.layers.Add` layer. + + Args: + inputs: A list of input tensors with the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the sum of the inputs. It has the same shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.add([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> added = keras.layers.add([x1, x2]) + >>> out = keras.layers.Dense(4)(added) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Add(**kwargs)(inputs) diff --git a/keras/layers/merging/average.py b/keras/layers/merging/average.py new file mode 100644 index 000000000..5763c99a6 --- /dev/null +++ b/keras/layers/merging/average.py @@ -0,0 +1,70 @@ +from keras import ops +from keras.api_export import keras_export +from keras.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Average") +class Average(Merge): + """Averages a list of inputs element-wise.. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Average()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `y = keras.layers.average([x1, x2])` + >>> y = keras.layers.Average()([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.add(output, inputs[i]) + return output / len(inputs) + + +@keras_export("keras.layers.average") +def average(inputs, **kwargs): + """Functional interface to the `keras.layers.Average` layer. + + Args: + inputs: A list of input tensors , all of the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the element-wise product of the inputs with the same + shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.average([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> y = keras.layers.average([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Average(**kwargs)(inputs) diff --git a/keras_core/layers/merging/base_merge.py b/keras/layers/merging/base_merge.py similarity index 98% rename from keras_core/layers/merging/base_merge.py rename to keras/layers/merging/base_merge.py index 8ef024328..ae00f8121 100644 --- a/keras_core/layers/merging/base_merge.py +++ b/keras/layers/merging/base_merge.py @@ -1,7 +1,7 @@ -from keras_core import backend -from keras_core import ops -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.layers.layer import Layer +from keras import backend +from keras import ops +from keras.backend.common.keras_tensor import KerasTensor +from keras.layers.layer import Layer class Merge(Layer): diff --git a/keras_core/layers/merging/concatenate.py b/keras/layers/merging/concatenate.py similarity index 92% rename from keras_core/layers/merging/concatenate.py rename to keras/layers/merging/concatenate.py index bdbd5130c..37f9536d8 100644 --- a/keras_core/layers/merging/concatenate.py +++ b/keras/layers/merging/concatenate.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.merging.base_merge import Merge +from keras import ops +from keras.api_export import keras_export +from keras.layers.merging.base_merge import Merge -@keras_core_export("keras_core.layers.Concatenate") +@keras_export("keras.layers.Concatenate") class Concatenate(Merge): """Concatenates a list of inputs. @@ -15,13 +15,13 @@ class Concatenate(Merge): >>> x = np.arange(20).reshape(2, 2, 5) >>> y = np.arange(20, 30).reshape(2, 1, 5) - >>> keras_core.layers.Concatenate(axis=1)([x, y]) + >>> keras.layers.Concatenate(axis=1)([x, y]) Usage in a Keras model: - >>> x1 = keras_core.layers.Dense(8)(np.arange(10).reshape(5, 2)) - >>> x2 = keras_core.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) - >>> y = keras_core.layers.Concatenate()([x1, x2]) + >>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) + >>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) + >>> y = keras.layers.Concatenate()([x1, x2]) Args: axis: Axis along which to concatenate. @@ -157,7 +157,7 @@ class Concatenate(Merge): return dict(list(base_config.items()) + list(config.items())) -@keras_core_export("keras_core.layers.concatenate") +@keras_export("keras.layers.concatenate") def concatenate(inputs, axis=-1, **kwargs): """Functional interface to the `Concatenate` layer. diff --git a/keras_core/layers/merging/dot.py b/keras/layers/merging/dot.py similarity index 95% rename from keras_core/layers/merging/dot.py rename to keras/layers/merging/dot.py index bfa728f69..7e977faa8 100644 --- a/keras_core/layers/merging/dot.py +++ b/keras/layers/merging/dot.py @@ -1,7 +1,7 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.merging.base_merge import Merge -from keras_core.utils.numerical_utils import normalize +from keras import ops +from keras.api_export import keras_export +from keras.layers.merging.base_merge import Merge +from keras.utils.numerical_utils import normalize def batch_dot(x, y, axes=None): @@ -105,7 +105,7 @@ def batch_dot(x, y, axes=None): "Cannot perform batch_dot over axis 0. " "If your inputs are not batched, " "add a dummy batch dimension to your " - "inputs using keras_core.ops.expand_dims(x, 0)" + "inputs using keras.ops.expand_dims(x, 0)" ) a0, a1 = axes d1 = x_shape[a0] @@ -196,7 +196,7 @@ def batch_dot(x, y, axes=None): return result -@keras_core_export("keras_core.layers.Dot") +@keras_export("keras.layers.Dot") class Dot(Merge): """Computes element-wise dot product of two tensors. @@ -215,13 +215,13 @@ class Dot(Merge): >>> x = np.arange(10).reshape(1, 5, 2) >>> y = np.arange(10, 20).reshape(1, 2, 5) - >>> keras_core.layers.Dot(axes=(1, 2))([x, y]) + >>> keras.layers.Dot(axes=(1, 2))([x, y]) Usage in a Keras model: - >>> x1 = keras_core.layers.Dense(8)(np.arange(10).reshape(5, 2)) - >>> x2 = keras_core.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) - >>> y = keras_core.layers.Dot(axes=1)([x1, x2]) + >>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) + >>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) + >>> y = keras.layers.Dot(axes=1)([x1, x2]) Args: axes: Integer or tuple of integers, axis or axes along which to @@ -355,7 +355,7 @@ class Dot(Merge): return dict(list(base_config.items()) + list(config.items())) -@keras_core_export("keras_core.layers.dot") +@keras_export("keras.layers.dot") def dot(inputs, axes=-1, **kwargs): """Functional interface to the `Dot` layer. diff --git a/keras/layers/merging/maximum.py b/keras/layers/merging/maximum.py new file mode 100644 index 000000000..fa71c1314 --- /dev/null +++ b/keras/layers/merging/maximum.py @@ -0,0 +1,70 @@ +from keras import ops +from keras.api_export import keras_export +from keras.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Maximum") +class Maximum(Merge): + """Computes element-wise maximum on a list of inputs. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Maximum()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `y = keras.layers.maximum([x1, x2])` + >>> y = keras.layers.Maximum()([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.maximum(output, inputs[i]) + return output + + +@keras_export("keras.layers.maximum") +def maximum(inputs, **kwargs): + """Functional interface to the `keras.layers.Maximum` layer. + + Args: + inputs: A list of input tensors , all of the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the element-wise product of the inputs with the same + shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.maximum([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> y = keras.layers.maximum([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Maximum(**kwargs)(inputs) diff --git a/keras_core/layers/merging/merging_test.py b/keras/layers/merging/merging_test.py similarity index 98% rename from keras_core/layers/merging/merging_test.py rename to keras/layers/merging/merging_test.py index d3df625cb..28aed2fac 100644 --- a/keras_core/layers/merging/merging_test.py +++ b/keras/layers/merging/merging_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing +from keras import backend +from keras import layers +from keras import models +from keras import testing def np_dot(a, b, axes): diff --git a/keras/layers/merging/minimum.py b/keras/layers/merging/minimum.py new file mode 100644 index 000000000..bca89abd6 --- /dev/null +++ b/keras/layers/merging/minimum.py @@ -0,0 +1,70 @@ +from keras import ops +from keras.api_export import keras_export +from keras.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Minimum") +class Minimum(Merge): + """Computes elementwise minimum on a list of inputs. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Minimum()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `y = keras.layers.minimum([x1, x2])` + >>> y = keras.layers.Minimum()([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.minimum(output, inputs[i]) + return output + + +@keras_export("keras.layers.minimum") +def minimum(inputs, **kwargs): + """Functional interface to the `keras.layers.Minimum` layer. + + Args: + inputs: A list of input tensors , all of the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the elementwise product of the inputs with the same + shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.minimum([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> y = keras.layers.minimum([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Minimum(**kwargs)(inputs) diff --git a/keras/layers/merging/multiply.py b/keras/layers/merging/multiply.py new file mode 100644 index 000000000..929b51191 --- /dev/null +++ b/keras/layers/merging/multiply.py @@ -0,0 +1,70 @@ +from keras import ops +from keras.api_export import keras_export +from keras.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Multiply") +class Multiply(Merge): + """Performs elementwise multiplication. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Multiply()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `y = keras.layers.multiply([x1, x2])` + >>> y = keras.layers.Multiply()([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.multiply(output, inputs[i]) + return output + + +@keras_export("keras.layers.multiply") +def multiply(inputs, **kwargs): + """Functional interface to the `keras.layers.Multiply` layer. + + Args: + inputs: A list of input tensors , all of the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the elementwise product of the inputs with the same + shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.multiply([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> y = keras.layers.multiply([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Multiply(**kwargs)(inputs) diff --git a/keras_core/layers/merging/subtract.py b/keras/layers/merging/subtract.py similarity index 52% rename from keras_core/layers/merging/subtract.py rename to keras/layers/merging/subtract.py index 1b1bf6575..4dc783917 100644 --- a/keras_core/layers/merging/subtract.py +++ b/keras/layers/merging/subtract.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.merging.base_merge import Merge +from keras import ops +from keras.api_export import keras_export +from keras.layers.merging.base_merge import Merge -@keras_core_export("keras_core.layers.Subtract") +@keras_export("keras.layers.Subtract") class Subtract(Merge): """Performs elementwise subtraction. @@ -16,18 +16,18 @@ class Subtract(Merge): >>> input_shape = (2, 3, 4) >>> x1 = np.random.rand(*input_shape) >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.Subtract()([x1, x2]) + >>> y = keras.layers.Subtract()([x1, x2]) Usage in a Keras model: - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> # equivalent to `subtracted = keras_core.layers.subtract([x1, x2])` - >>> subtracted = keras_core.layers.Subtract()([x1, x2]) - >>> out = keras_core.layers.Dense(4)(subtracted) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `subtracted = keras.layers.subtract([x1, x2])` + >>> subtracted = keras.layers.Subtract()([x1, x2]) + >>> out = keras.layers.Dense(4)(subtracted) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) """ @@ -48,9 +48,9 @@ class Subtract(Merge): return ops.subtract(inputs[0], inputs[1]) -@keras_core_export("keras_core.layers.subtract") +@keras_export("keras.layers.subtract") def subtract(inputs, **kwargs): - """Functional interface to the `keras_core.layers.Subtract` layer. + """Functional interface to the `keras.layers.Subtract` layer. Args: inputs: A list of input tensors of size 2, each tensor of @@ -66,17 +66,17 @@ def subtract(inputs, **kwargs): >>> input_shape = (2, 3, 4) >>> x1 = np.random.rand(*input_shape) >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.subtract([x1, x2]) + >>> y = keras.layers.subtract([x1, x2]) Usage in a Keras model: - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> subtracted = keras_core.layers.subtract([x1, x2]) - >>> out = keras_core.layers.Dense(4)(subtracted) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> subtracted = keras.layers.subtract([x1, x2]) + >>> out = keras.layers.Dense(4)(subtracted) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) """ return Subtract(**kwargs)(inputs) diff --git a/keras_core/layers/normalization/__init__.py b/keras/layers/normalization/__init__.py similarity index 100% rename from keras_core/layers/normalization/__init__.py rename to keras/layers/normalization/__init__.py diff --git a/keras_core/layers/normalization/batch_normalization.py b/keras/layers/normalization/batch_normalization.py similarity index 96% rename from keras_core/layers/normalization/batch_normalization.py rename to keras/layers/normalization/batch_normalization.py index 5bb3a4eb7..a33a70665 100644 --- a/keras_core/layers/normalization/batch_normalization.py +++ b/keras/layers/normalization/batch_normalization.py @@ -1,14 +1,14 @@ -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.backend import standardize_dtype -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.backend import standardize_dtype +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.BatchNormalization") +@keras_export("keras.layers.BatchNormalization") class BatchNormalization(Layer): """Layer that normalizes its inputs. diff --git a/keras_core/layers/normalization/batch_normalization_test.py b/keras/layers/normalization/batch_normalization_test.py similarity index 98% rename from keras_core/layers/normalization/batch_normalization_test.py rename to keras/layers/normalization/batch_normalization_test.py index 2eebac3af..980ac916b 100644 --- a/keras_core/layers/normalization/batch_normalization_test.py +++ b/keras/layers/normalization/batch_normalization_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class BatchNormalizationTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/normalization/group_normalization.py b/keras/layers/normalization/group_normalization.py similarity index 96% rename from keras_core/layers/normalization/group_normalization.py rename to keras/layers/normalization/group_normalization.py index 3bd5d21c6..755eb5cd7 100644 --- a/keras_core/layers/normalization/group_normalization.py +++ b/keras/layers/normalization/group_normalization.py @@ -1,13 +1,13 @@ -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.GroupNormalization") +@keras_export("keras.layers.GroupNormalization") class GroupNormalization(Layer): """Group normalization layer. diff --git a/keras_core/layers/normalization/group_normalization_test.py b/keras/layers/normalization/group_normalization_test.py similarity index 97% rename from keras_core/layers/normalization/group_normalization_test.py rename to keras/layers/normalization/group_normalization_test.py index 1780f62a7..c836b8930 100644 --- a/keras_core/layers/normalization/group_normalization_test.py +++ b/keras/layers/normalization/group_normalization_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras_core import constraints -from keras_core import layers -from keras_core import regularizers -from keras_core import testing +from keras import constraints +from keras import layers +from keras import regularizers +from keras import testing class GroupNormalizationTest(testing.TestCase): diff --git a/keras_core/layers/normalization/layer_normalization.py b/keras/layers/normalization/layer_normalization.py similarity index 96% rename from keras_core/layers/normalization/layer_normalization.py rename to keras/layers/normalization/layer_normalization.py index f4cb231d4..b449445a6 100644 --- a/keras_core/layers/normalization/layer_normalization.py +++ b/keras/layers/normalization/layer_normalization.py @@ -1,12 +1,12 @@ -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.LayerNormalization") +@keras_export("keras.layers.LayerNormalization") class LayerNormalization(Layer): """Layer normalization layer (Ba et al., 2016). @@ -53,7 +53,7 @@ class LayerNormalization(Layer): For example: - >>> layer = keras_core.layers.LayerNormalization(axis=[1, 2, 3]) + >>> layer = keras.layers.LayerNormalization(axis=[1, 2, 3]) >>> layer.build([5, 20, 30, 40]) >>> print(layer.beta.shape) (20, 30, 40) diff --git a/keras_core/layers/normalization/layer_normalization_test.py b/keras/layers/normalization/layer_normalization_test.py similarity index 97% rename from keras_core/layers/normalization/layer_normalization_test.py rename to keras/layers/normalization/layer_normalization_test.py index 94b039db0..01e93cf85 100644 --- a/keras_core/layers/normalization/layer_normalization_test.py +++ b/keras/layers/normalization/layer_normalization_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras_core import layers -from keras_core import ops -from keras_core import regularizers -from keras_core import testing +from keras import layers +from keras import ops +from keras import regularizers +from keras import testing class LayerNormalizationTest(testing.TestCase): diff --git a/keras_core/layers/normalization/spectral_normalization.py b/keras/layers/normalization/spectral_normalization.py similarity index 86% rename from keras_core/layers/normalization/spectral_normalization.py rename to keras/layers/normalization/spectral_normalization.py index e8c13c4f4..aae46e70e 100644 --- a/keras_core/layers/normalization/spectral_normalization.py +++ b/keras/layers/normalization/spectral_normalization.py @@ -1,12 +1,12 @@ -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers import Wrapper -from keras_core.layers.input_spec import InputSpec -from keras_core.utils.numerical_utils import normalize +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.layers import Wrapper +from keras.layers.input_spec import InputSpec +from keras.utils.numerical_utils import normalize -@keras_core_export("keras_core.layers.SpectralNormalization") +@keras_export("keras.layers.SpectralNormalization") class SpectralNormalization(Wrapper): """Performs spectral normalization on the weights of a target layer. @@ -14,7 +14,7 @@ class SpectralNormalization(Wrapper): constraining their spectral norm, which can stabilize the training of GANs. Args: - layer: A `keras_core.layers.Layer` instance that + layer: A `keras.layers.Layer` instance that has either a `kernel` (e.g. `Conv2D`, `Dense`...) or an `embeddings` attribute (`Embedding` layer). power_iterations: int, the number of iterations during normalization. @@ -22,16 +22,16 @@ class SpectralNormalization(Wrapper): Examples: - Wrap `keras_core.layers.Conv2D`: + Wrap `keras.layers.Conv2D`: >>> x = np.random.rand(1, 10, 10, 1) - >>> conv2d = SpectralNormalization(keras_core.layers.Conv2D(2, 2)) + >>> conv2d = SpectralNormalization(keras.layers.Conv2D(2, 2)) >>> y = conv2d(x) >>> y.shape (1, 9, 9, 2) - Wrap `keras_core.layers.Dense`: + Wrap `keras.layers.Dense`: >>> x = np.random.rand(1, 10, 10, 1) - >>> dense = SpectralNormalization(keras_core.layers.Dense(10)) + >>> dense = SpectralNormalization(keras.layers.Dense(10)) >>> y = dense(x) >>> y.shape (1, 10, 10, 10) diff --git a/keras_core/layers/normalization/spectral_normalization_test.py b/keras/layers/normalization/spectral_normalization_test.py similarity index 95% rename from keras_core/layers/normalization/spectral_normalization_test.py rename to keras/layers/normalization/spectral_normalization_test.py index 488beae24..827b88755 100644 --- a/keras_core/layers/normalization/spectral_normalization_test.py +++ b/keras/layers/normalization/spectral_normalization_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import initializers +from keras import layers +from keras import testing class SpectralNormalizationTest(testing.TestCase): diff --git a/keras_core/layers/normalization/unit_normalization.py b/keras/layers/normalization/unit_normalization.py similarity index 86% rename from keras_core/layers/normalization/unit_normalization.py rename to keras/layers/normalization/unit_normalization.py index 33553ada0..09b5a34e2 100644 --- a/keras_core/layers/normalization/unit_normalization.py +++ b/keras/layers/normalization/unit_normalization.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import ops +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.UnitNormalization") +@keras_export("keras.layers.UnitNormalization") class UnitNormalization(Layer): """Unit normalization layer. @@ -13,7 +13,7 @@ class UnitNormalization(Layer): Example: >>> data = np.arange(6).reshape(2, 3) - >>> normalized_data = keras_core.layers.UnitNormalization()(data) + >>> normalized_data = keras.layers.UnitNormalization()(data) >>> print(np.sum(normalized_data[0, :] ** 2) 1.0 diff --git a/keras_core/layers/normalization/unit_normalization_test.py b/keras/layers/normalization/unit_normalization_test.py similarity index 95% rename from keras_core/layers/normalization/unit_normalization_test.py rename to keras/layers/normalization/unit_normalization_test.py index 94235e855..591e03311 100644 --- a/keras_core/layers/normalization/unit_normalization_test.py +++ b/keras/layers/normalization/unit_normalization_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing def squared_l2_norm(x): diff --git a/keras_core/layers/pooling/__init__.py b/keras/layers/pooling/__init__.py similarity index 100% rename from keras_core/layers/pooling/__init__.py rename to keras/layers/pooling/__init__.py diff --git a/keras_core/layers/pooling/average_pooling1d.py b/keras/layers/pooling/average_pooling1d.py similarity index 87% rename from keras_core/layers/pooling/average_pooling1d.py rename to keras/layers/pooling/average_pooling1d.py index f641f8359..193e6d3cc 100644 --- a/keras_core/layers/pooling/average_pooling1d.py +++ b/keras/layers/pooling/average_pooling1d.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_pooling import BasePooling +from keras.api_export import keras_export +from keras.layers.pooling.base_pooling import BasePooling -@keras_core_export( - ["keras_core.layers.AveragePooling1D", "keras_core.layers.AvgPool1D"] +@keras_export( + ["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"] ) class AveragePooling1D(BasePooling): """Average pooling for temporal data. @@ -50,7 +50,7 @@ class AveragePooling1D(BasePooling): >>> x = np.array([1., 2., 3., 4., 5.]) >>> x = np.reshape(x, [1, 5, 1]) - >>> avg_pool_1d = keras_core.layers.AveragePooling1D(pool_size=2, + >>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2, ... strides=1, padding="valid") >>> avg_pool_1d(x) @@ -58,7 +58,7 @@ class AveragePooling1D(BasePooling): >>> x = np.array([1., 2., 3., 4., 5.]) >>> x = np.reshape(x, [1, 5, 1]) - >>> avg_pool_1d = keras_core.layers.AveragePooling1D(pool_size=2, + >>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2, ... strides=2, padding="valid") >>> avg_pool_1d(x) @@ -66,7 +66,7 @@ class AveragePooling1D(BasePooling): >>> x = np.array([1., 2., 3., 4., 5.]) >>> x = np.reshape(x, [1, 5, 1]) - >>> avg_pool_1d = keras_core.layers.AveragePooling1D(pool_size=2, + >>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2, ... strides=1, padding="same") >>> avg_pool_1d(x) """ diff --git a/keras_core/layers/pooling/average_pooling2d.py b/keras/layers/pooling/average_pooling2d.py similarity index 89% rename from keras_core/layers/pooling/average_pooling2d.py rename to keras/layers/pooling/average_pooling2d.py index 2c7083523..9a6c28cc4 100644 --- a/keras_core/layers/pooling/average_pooling2d.py +++ b/keras/layers/pooling/average_pooling2d.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_pooling import BasePooling +from keras.api_export import keras_export +from keras.layers.pooling.base_pooling import BasePooling -@keras_core_export( - ["keras_core.layers.AveragePooling2D", "keras_core.layers.AvgPool2D"] +@keras_export( + ["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"] ) class AveragePooling2D(BasePooling): """Average pooling operation for 2D spatial data. @@ -63,7 +63,7 @@ class AveragePooling2D(BasePooling): ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = np.reshape(x, [1, 3, 3, 1]) - >>> avg_pool_2d = keras_core.layers.AveragePooling2D(pool_size=(2, 2), + >>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2), ... strides=(1, 1), padding="valid") >>> avg_pool_2d(x) @@ -73,7 +73,7 @@ class AveragePooling2D(BasePooling): ... [5., 6., 7., 8.], ... [9., 10., 11., 12.]]) >>> x = np.reshape(x, [1, 3, 4, 1]) - >>> avg_pool_2d = keras_core.layers.AveragePooling2D(pool_size=(2, 2), + >>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2), ... strides=(2, 2), padding="valid") >>> avg_pool_2d(x) @@ -83,7 +83,7 @@ class AveragePooling2D(BasePooling): ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = np.reshape(x, [1, 3, 3, 1]) - >>> avg_pool_2d = keras_core.layers.AveragePooling2D(pool_size=(2, 2), + >>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2), ... strides=(1, 1), padding="same") >>> avg_pool_2d(x) """ diff --git a/keras_core/layers/pooling/average_pooling3d.py b/keras/layers/pooling/average_pooling3d.py similarity index 89% rename from keras_core/layers/pooling/average_pooling3d.py rename to keras/layers/pooling/average_pooling3d.py index ebea0cd97..5911aa834 100644 --- a/keras_core/layers/pooling/average_pooling3d.py +++ b/keras/layers/pooling/average_pooling3d.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_pooling import BasePooling +from keras.api_export import keras_export +from keras.layers.pooling.base_pooling import BasePooling -@keras_core_export( - ["keras_core.layers.AveragePooling3D", "keras_core.layers.AvgPool3D"] +@keras_export( + ["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"] ) class AveragePooling3D(BasePooling): """Average pooling operation for 3D data (spatial or spatio-temporal). @@ -58,8 +58,8 @@ class AveragePooling3D(BasePooling): width = 30 channels = 3 - inputs = keras_core.layers.Input(shape=(depth, height, width, channels)) - layer = keras_core.layers.AveragePooling3D(pool_size=3) + inputs = keras.layers.Input(shape=(depth, height, width, channels)) + layer = keras.layers.AveragePooling3D(pool_size=3) outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3) ``` """ diff --git a/keras_core/layers/pooling/average_pooling_test.py b/keras/layers/pooling/average_pooling_test.py similarity index 99% rename from keras_core/layers/pooling/average_pooling_test.py rename to keras/layers/pooling/average_pooling_test.py index a31c86bc3..436e2769f 100644 --- a/keras_core/layers/pooling/average_pooling_test.py +++ b/keras/layers/pooling/average_pooling_test.py @@ -3,9 +3,9 @@ import pytest from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing @pytest.mark.requires_trainable_backend diff --git a/keras_core/layers/pooling/base_global_pooling.py b/keras/layers/pooling/base_global_pooling.py similarity index 90% rename from keras_core/layers/pooling/base_global_pooling.py rename to keras/layers/pooling/base_global_pooling.py index cdd199704..1eda65a6c 100644 --- a/keras_core/layers/pooling/base_global_pooling.py +++ b/keras/layers/pooling/base_global_pooling.py @@ -1,6 +1,6 @@ -from keras_core.backend import image_data_format -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras.backend import image_data_format +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer class BaseGlobalPooling(Layer): diff --git a/keras_core/layers/pooling/base_pooling.py b/keras/layers/pooling/base_pooling.py similarity index 88% rename from keras_core/layers/pooling/base_pooling.py rename to keras/layers/pooling/base_pooling.py index 9cffdf06a..85751086c 100644 --- a/keras_core/layers/pooling/base_pooling.py +++ b/keras/layers/pooling/base_pooling.py @@ -1,9 +1,9 @@ -from keras_core import backend -from keras_core import ops -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.ops.operation_utils import compute_pooling_output_shape -from keras_core.utils import argument_validation +from keras import backend +from keras import ops +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.ops.operation_utils import compute_pooling_output_shape +from keras.utils import argument_validation class BasePooling(Layer): diff --git a/keras_core/layers/pooling/global_average_pooling1d.py b/keras/layers/pooling/global_average_pooling1d.py similarity index 88% rename from keras_core/layers/pooling/global_average_pooling1d.py rename to keras/layers/pooling/global_average_pooling1d.py index 6878650d9..f5b4d0a08 100644 --- a/keras_core/layers/pooling/global_average_pooling1d.py +++ b/keras/layers/pooling/global_average_pooling1d.py @@ -1,13 +1,13 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.pooling.base_global_pooling import BaseGlobalPooling -@keras_core_export( +@keras_export( [ - "keras_core.layers.GlobalAveragePooling1D", - "keras_core.layers.GlobalAvgPool1D", + "keras.layers.GlobalAveragePooling1D", + "keras.layers.GlobalAvgPool1D", ] ) class GlobalAveragePooling1D(BaseGlobalPooling): @@ -54,7 +54,7 @@ class GlobalAveragePooling1D(BaseGlobalPooling): Example: >>> x = np.random.rand(2, 3, 4) - >>> y = keras_core.layers.GlobalAveragePooling1D()(x) + >>> y = keras.layers.GlobalAveragePooling1D()(x) >>> y.shape (2, 4) """ diff --git a/keras_core/layers/pooling/global_average_pooling2d.py b/keras/layers/pooling/global_average_pooling2d.py similarity index 86% rename from keras_core/layers/pooling/global_average_pooling2d.py rename to keras/layers/pooling/global_average_pooling2d.py index 8e5dba31e..d6147ea21 100644 --- a/keras_core/layers/pooling/global_average_pooling2d.py +++ b/keras/layers/pooling/global_average_pooling2d.py @@ -1,12 +1,12 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras import ops +from keras.api_export import keras_export +from keras.layers.pooling.base_global_pooling import BaseGlobalPooling -@keras_core_export( +@keras_export( [ - "keras_core.layers.GlobalAveragePooling2D", - "keras_core.layers.GlobalAvgPool2D", + "keras.layers.GlobalAveragePooling2D", + "keras.layers.GlobalAvgPool2D", ] ) class GlobalAveragePooling2D(BaseGlobalPooling): @@ -49,7 +49,7 @@ class GlobalAveragePooling2D(BaseGlobalPooling): Example: >>> x = np.random.rand(2, 4, 5, 3) - >>> y = keras_core.layers.GlobalAveragePooling2D()(x) + >>> y = keras.layers.GlobalAveragePooling2D()(x) >>> y.shape (2, 3) """ diff --git a/keras_core/layers/pooling/global_average_pooling3d.py b/keras/layers/pooling/global_average_pooling3d.py similarity index 87% rename from keras_core/layers/pooling/global_average_pooling3d.py rename to keras/layers/pooling/global_average_pooling3d.py index d930dc3a7..8fa05eea5 100644 --- a/keras_core/layers/pooling/global_average_pooling3d.py +++ b/keras/layers/pooling/global_average_pooling3d.py @@ -1,12 +1,12 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras import ops +from keras.api_export import keras_export +from keras.layers.pooling.base_global_pooling import BaseGlobalPooling -@keras_core_export( +@keras_export( [ - "keras_core.layers.GlobalAveragePooling3D", - "keras_core.layers.GlobalAvgPool3D", + "keras.layers.GlobalAveragePooling3D", + "keras.layers.GlobalAvgPool3D", ] ) class GlobalAveragePooling3D(BaseGlobalPooling): @@ -50,7 +50,7 @@ class GlobalAveragePooling3D(BaseGlobalPooling): Example: >>> x = np.random.rand(2, 4, 5, 4, 3) - >>> y = keras_core.layers.GlobalAveragePooling3D()(x) + >>> y = keras.layers.GlobalAveragePooling3D()(x) >>> y.shape (2, 3) """ diff --git a/keras_core/layers/pooling/global_average_pooling_test.py b/keras/layers/pooling/global_average_pooling_test.py similarity index 98% rename from keras_core/layers/pooling/global_average_pooling_test.py rename to keras/layers/pooling/global_average_pooling_test.py index 60a832742..4069abc75 100644 --- a/keras_core/layers/pooling/global_average_pooling_test.py +++ b/keras/layers/pooling/global_average_pooling_test.py @@ -2,8 +2,8 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing @pytest.mark.requires_trainable_backend diff --git a/keras_core/layers/pooling/global_max_pooling1d.py b/keras/layers/pooling/global_max_pooling1d.py similarity index 86% rename from keras_core/layers/pooling/global_max_pooling1d.py rename to keras/layers/pooling/global_max_pooling1d.py index 7b02fa51d..2956474cb 100644 --- a/keras_core/layers/pooling/global_max_pooling1d.py +++ b/keras/layers/pooling/global_max_pooling1d.py @@ -1,12 +1,12 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras import ops +from keras.api_export import keras_export +from keras.layers.pooling.base_global_pooling import BaseGlobalPooling -@keras_core_export( +@keras_export( [ - "keras_core.layers.GlobalMaxPooling1D", - "keras_core.layers.GlobalMaxPool1D", + "keras.layers.GlobalMaxPooling1D", + "keras.layers.GlobalMaxPool1D", ] ) class GlobalMaxPooling1D(BaseGlobalPooling): @@ -48,7 +48,7 @@ class GlobalMaxPooling1D(BaseGlobalPooling): Example: >>> x = np.random.rand(2, 3, 4) - >>> y = keras_core.layers.GlobalMaxPooling1D()(x) + >>> y = keras.layers.GlobalMaxPooling1D()(x) >>> y.shape (2, 4) """ diff --git a/keras_core/layers/pooling/global_max_pooling2d.py b/keras/layers/pooling/global_max_pooling2d.py similarity index 87% rename from keras_core/layers/pooling/global_max_pooling2d.py rename to keras/layers/pooling/global_max_pooling2d.py index e39a2e605..66a117cd5 100644 --- a/keras_core/layers/pooling/global_max_pooling2d.py +++ b/keras/layers/pooling/global_max_pooling2d.py @@ -1,12 +1,12 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras import ops +from keras.api_export import keras_export +from keras.layers.pooling.base_global_pooling import BaseGlobalPooling -@keras_core_export( +@keras_export( [ - "keras_core.layers.GlobalMaxPooling2D", - "keras_core.layers.GlobalMaxPool2D", + "keras.layers.GlobalMaxPooling2D", + "keras.layers.GlobalMaxPool2D", ] ) class GlobalMaxPooling2D(BaseGlobalPooling): @@ -49,7 +49,7 @@ class GlobalMaxPooling2D(BaseGlobalPooling): Example: >>> x = np.random.rand(2, 4, 5, 3) - >>> y = keras_core.layers.GlobalMaxPooling2D()(x) + >>> y = keras.layers.GlobalMaxPooling2D()(x) >>> y.shape (2, 3) """ diff --git a/keras_core/layers/pooling/global_max_pooling3d.py b/keras/layers/pooling/global_max_pooling3d.py similarity index 87% rename from keras_core/layers/pooling/global_max_pooling3d.py rename to keras/layers/pooling/global_max_pooling3d.py index 8ede80baf..fb4598356 100644 --- a/keras_core/layers/pooling/global_max_pooling3d.py +++ b/keras/layers/pooling/global_max_pooling3d.py @@ -1,12 +1,12 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras import ops +from keras.api_export import keras_export +from keras.layers.pooling.base_global_pooling import BaseGlobalPooling -@keras_core_export( +@keras_export( [ - "keras_core.layers.GlobalMaxPooling3D", - "keras_core.layers.GlobalMaxPool3D", + "keras.layers.GlobalMaxPooling3D", + "keras.layers.GlobalMaxPool3D", ] ) class GlobalMaxPooling3D(BaseGlobalPooling): @@ -50,7 +50,7 @@ class GlobalMaxPooling3D(BaseGlobalPooling): Example: >>> x = np.random.rand(2, 4, 5, 4, 3) - >>> y = keras_core.layers.GlobalMaxPooling3D()(x) + >>> y = keras.layers.GlobalMaxPooling3D()(x) >>> y.shape (2, 3) """ diff --git a/keras_core/layers/pooling/global_max_pooling_test.py b/keras/layers/pooling/global_max_pooling_test.py similarity index 98% rename from keras_core/layers/pooling/global_max_pooling_test.py rename to keras/layers/pooling/global_max_pooling_test.py index 0d1dd5904..6b413fd17 100644 --- a/keras_core/layers/pooling/global_max_pooling_test.py +++ b/keras/layers/pooling/global_max_pooling_test.py @@ -2,8 +2,8 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing @pytest.mark.requires_trainable_backend diff --git a/keras_core/layers/pooling/max_pooling1d.py b/keras/layers/pooling/max_pooling1d.py similarity index 88% rename from keras_core/layers/pooling/max_pooling1d.py rename to keras/layers/pooling/max_pooling1d.py index acd141c38..f2a40e631 100644 --- a/keras_core/layers/pooling/max_pooling1d.py +++ b/keras/layers/pooling/max_pooling1d.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_pooling import BasePooling +from keras.api_export import keras_export +from keras.layers.pooling.base_pooling import BasePooling -@keras_core_export( - ["keras_core.layers.MaxPooling1D", "keras_core.layers.MaxPool1D"] +@keras_export( + ["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"] ) class MaxPooling1D(BasePooling): """Max pooling operation for 1D temporal data. @@ -51,7 +51,7 @@ class MaxPooling1D(BasePooling): >>> x = np.array([1., 2., 3., 4., 5.]) >>> x = np.reshape(x, [1, 5, 1]) - >>> max_pool_1d = keras_core.layers.MaxPooling1D(pool_size=2, + >>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2, ... strides=1, padding="valid") >>> max_pool_1d(x) @@ -59,7 +59,7 @@ class MaxPooling1D(BasePooling): >>> x = np.array([1., 2., 3., 4., 5.]) >>> x = np.reshape(x, [1, 5, 1]) - >>> max_pool_1d = keras_core.layers.MaxPooling1D(pool_size=2, + >>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2, ... strides=2, padding="valid") >>> max_pool_1d(x) @@ -67,7 +67,7 @@ class MaxPooling1D(BasePooling): >>> x = np.array([1., 2., 3., 4., 5.]) >>> x = np.reshape(x, [1, 5, 1]) - >>> max_pool_1d = keras_core.layers.MaxPooling1D(pool_size=2, + >>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2, ... strides=1, padding="same") >>> max_pool_1d(x) """ diff --git a/keras_core/layers/pooling/max_pooling2d.py b/keras/layers/pooling/max_pooling2d.py similarity index 89% rename from keras_core/layers/pooling/max_pooling2d.py rename to keras/layers/pooling/max_pooling2d.py index 9111120bc..b5299336b 100644 --- a/keras_core/layers/pooling/max_pooling2d.py +++ b/keras/layers/pooling/max_pooling2d.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_pooling import BasePooling +from keras.api_export import keras_export +from keras.layers.pooling.base_pooling import BasePooling -@keras_core_export( - ["keras_core.layers.MaxPooling2D", "keras_core.layers.MaxPool2D"] +@keras_export( + ["keras.layers.MaxPooling2D", "keras.layers.MaxPool2D"] ) class MaxPooling2D(BasePooling): """Max pooling operation for 2D spatial data. @@ -63,7 +63,7 @@ class MaxPooling2D(BasePooling): ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = np.reshape(x, [1, 3, 3, 1]) - >>> max_pool_2d = keras_core.layers.MaxPooling2D(pool_size=(2, 2), + >>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding="valid") >>> max_pool_2d(x) @@ -73,7 +73,7 @@ class MaxPooling2D(BasePooling): ... [5., 6., 7., 8.], ... [9., 10., 11., 12.]]) >>> x = np.reshape(x, [1, 3, 4, 1]) - >>> max_pool_2d = keras_core.layers.MaxPooling2D(pool_size=(2, 2), + >>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(2, 2), padding="valid") >>> max_pool_2d(x) @@ -83,7 +83,7 @@ class MaxPooling2D(BasePooling): ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = np.reshape(x, [1, 3, 3, 1]) - >>> max_pool_2d = keras_core.layers.MaxPooling2D(pool_size=(2, 2), + >>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding="same") >>> max_pool_2d(x) """ diff --git a/keras_core/layers/pooling/max_pooling3d.py b/keras/layers/pooling/max_pooling3d.py similarity index 89% rename from keras_core/layers/pooling/max_pooling3d.py rename to keras/layers/pooling/max_pooling3d.py index 56dea39b0..7cfcd6308 100644 --- a/keras_core/layers/pooling/max_pooling3d.py +++ b/keras/layers/pooling/max_pooling3d.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.pooling.base_pooling import BasePooling +from keras.api_export import keras_export +from keras.layers.pooling.base_pooling import BasePooling -@keras_core_export( - ["keras_core.layers.MaxPooling3D", "keras_core.layers.MaxPool3D"] +@keras_export( + ["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"] ) class MaxPooling3D(BasePooling): """Max pooling operation for 3D data (spatial or spatio-temporal). @@ -58,8 +58,8 @@ class MaxPooling3D(BasePooling): width = 30 channels = 3 - inputs = keras_core.layers.Input(shape=(depth, height, width, channels)) - layer = keras_core.layers.MaxPooling3D(pool_size=3) + inputs = keras.layers.Input(shape=(depth, height, width, channels)) + layer = keras.layers.MaxPooling3D(pool_size=3) outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3) ``` """ diff --git a/keras_core/layers/pooling/max_pooling_test.py b/keras/layers/pooling/max_pooling_test.py similarity index 99% rename from keras_core/layers/pooling/max_pooling_test.py rename to keras/layers/pooling/max_pooling_test.py index 37b86caa3..7f8dd437a 100644 --- a/keras_core/layers/pooling/max_pooling_test.py +++ b/keras/layers/pooling/max_pooling_test.py @@ -3,8 +3,8 @@ import pytest from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing @pytest.mark.requires_trainable_backend diff --git a/keras_core/layers/preprocessing/__init__.py b/keras/layers/preprocessing/__init__.py similarity index 100% rename from keras_core/layers/preprocessing/__init__.py rename to keras/layers/preprocessing/__init__.py diff --git a/keras_core/layers/preprocessing/category_encoding.py b/keras/layers/preprocessing/category_encoding.py similarity index 92% rename from keras_core/layers/preprocessing/category_encoding.py rename to keras/layers/preprocessing/category_encoding.py index 44c2aa55f..56094e688 100644 --- a/keras_core/layers/preprocessing/category_encoding.py +++ b/keras/layers/preprocessing/category_encoding.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.utils import backend_utils +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.utils import backend_utils -@keras_core_export("keras_core.layers.CategoryEncoding") +@keras_export("keras.layers.CategoryEncoding") class CategoryEncoding(TFDataLayer): """A preprocessing layer which encodes integer features. @@ -12,7 +12,7 @@ class CategoryEncoding(TFDataLayer): when the total number of tokens are known in advance. It accepts integer values as inputs, and it outputs a dense or sparse representation of those inputs. For integer inputs where the total number of tokens is not known, - use `keras_core.layers.IntegerLookup` instead. + use `keras.layers.IntegerLookup` instead. **Note:** This layer is safe to use inside a `tf.data` pipeline (independently of which backend you're using). @@ -21,7 +21,7 @@ class CategoryEncoding(TFDataLayer): **One-hot encoding data** - >>> layer = keras_core.layers.CategoryEncoding( + >>> layer = keras.layers.CategoryEncoding( ... num_tokens=4, output_mode="one_hot") >>> layer([3, 2, 0, 1]) array([[0., 0., 0., 1.], @@ -31,7 +31,7 @@ class CategoryEncoding(TFDataLayer): **Multi-hot encoding data** - >>> layer = keras_core.layers.CategoryEncoding( + >>> layer = keras.layers.CategoryEncoding( ... num_tokens=4, output_mode="multi_hot") >>> layer([[0, 1], [0, 0], [1, 2], [3, 1]]) array([[1., 1., 0., 0.], @@ -41,7 +41,7 @@ class CategoryEncoding(TFDataLayer): **Using weighted inputs in `"count"` mode** - >>> layer = keras_core.layers.CategoryEncoding( + >>> layer = keras.layers.CategoryEncoding( ... num_tokens=4, output_mode="count") >>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]]) >>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights) diff --git a/keras_core/layers/preprocessing/category_encoding_test.py b/keras/layers/preprocessing/category_encoding_test.py similarity index 98% rename from keras_core/layers/preprocessing/category_encoding_test.py rename to keras/layers/preprocessing/category_encoding_test.py index 594aeba39..236534300 100644 --- a/keras_core/layers/preprocessing/category_encoding_test.py +++ b/keras/layers/preprocessing/category_encoding_test.py @@ -1,8 +1,8 @@ import numpy as np from tensorflow import data as tf_data -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class CategoryEncodingTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/center_crop.py b/keras/layers/preprocessing/center_crop.py similarity index 95% rename from keras_core/layers/preprocessing/center_crop.py rename to keras/layers/preprocessing/center_crop.py index de3c5fee9..4112d7022 100644 --- a/keras_core/layers/preprocessing/center_crop.py +++ b/keras/layers/preprocessing/center_crop.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.utils import image_utils +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.utils import image_utils -@keras_core_export("keras_core.layers.CenterCrop") +@keras_export("keras.layers.CenterCrop") class CenterCrop(TFDataLayer): """A preprocessing layer which crops images. diff --git a/keras_core/layers/preprocessing/center_crop_test.py b/keras/layers/preprocessing/center_crop_test.py similarity index 98% rename from keras_core/layers/preprocessing/center_crop_test.py rename to keras/layers/preprocessing/center_crop_test.py index 71e0e930f..888737ec9 100644 --- a/keras_core/layers/preprocessing/center_crop_test.py +++ b/keras/layers/preprocessing/center_crop_test.py @@ -3,8 +3,8 @@ import pytest from absl.testing import parameterized from tensorflow import data as tf_data -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class CenterCropTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/preprocessing/discretization.py b/keras/layers/preprocessing/discretization.py similarity index 97% rename from keras_core/layers/preprocessing/discretization.py rename to keras/layers/preprocessing/discretization.py index 420e487a7..21627d6fb 100644 --- a/keras_core/layers/preprocessing/discretization.py +++ b/keras/layers/preprocessing/discretization.py @@ -1,14 +1,14 @@ import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.utils import argument_validation -from keras_core.utils import numerical_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.utils import argument_validation +from keras.utils import numerical_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.layers.Discretization") +@keras_export("keras.layers.Discretization") class Discretization(TFDataLayer): """A preprocessing layer which buckets continuous features by ranges. diff --git a/keras_core/layers/preprocessing/discretization_test.py b/keras/layers/preprocessing/discretization_test.py similarity index 96% rename from keras_core/layers/preprocessing/discretization_test.py rename to keras/layers/preprocessing/discretization_test.py index 3af625a1b..874b2de23 100644 --- a/keras_core/layers/preprocessing/discretization_test.py +++ b/keras/layers/preprocessing/discretization_test.py @@ -3,11 +3,11 @@ import os import numpy as np from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core.saving import saving_api +from keras import backend +from keras import layers +from keras import models +from keras import testing +from keras.saving import saving_api class DicretizationTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/feature_space.py b/keras/layers/preprocessing/feature_space.py similarity index 96% rename from keras_core/layers/preprocessing/feature_space.py rename to keras/layers/preprocessing/feature_space.py index 397be385f..d9835f174 100644 --- a/keras_core/layers/preprocessing/feature_space.py +++ b/keras/layers/preprocessing/feature_space.py @@ -1,14 +1,14 @@ import tree -from keras_core import backend -from keras_core import layers -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.saving import saving_lib -from keras_core.saving import serialization_lib -from keras_core.utils import backend_utils -from keras_core.utils.module_utils import tensorflow as tf -from keras_core.utils.naming import auto_name +from keras import backend +from keras import layers +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.saving import saving_lib +from keras.saving import serialization_lib +from keras.utils import backend_utils +from keras.utils.module_utils import tensorflow as tf +from keras.utils.naming import auto_name class Cross: @@ -69,7 +69,7 @@ class Feature: return cls(**config) -@keras_core_export("keras_core.utils.FeatureSpace") +@keras_export("keras.utils.FeatureSpace") class FeatureSpace(Layer): """One-stop utility for preprocessing and encoding structured data. @@ -106,11 +106,11 @@ class FeatureSpace(Layer): FeatureSpace.float(name=None) # Float values to be preprocessed via featurewise standardization - # (i.e. via a `keras_core.layers.Normalization` layer). + # (i.e. via a `keras.layers.Normalization` layer). FeatureSpace.float_normalized(name=None) # Float values to be preprocessed via linear rescaling - # (i.e. via a `keras_core.layers.Rescaling` layer). + # (i.e. via a `keras.layers.Rescaling` layer). FeatureSpace.float_rescaled(scale=1., offset=0., name=None) # Float values to be discretized. By default, the discrete @@ -184,8 +184,8 @@ class FeatureSpace(Layer): # Retrieve the corresponding encoded Keras tensors encoded_features = feature_space.get_encoded_features() # Build a Functional model - outputs = keras_core.layers.Dense(1, activation="sigmoid")(encoded_features) - model = keras_core.Model(inputs, outputs) + outputs = keras.layers.Dense(1, activation="sigmoid")(encoded_features) + model = keras.Model(inputs, outputs) ``` **Customizing each feature or feature cross:** @@ -256,7 +256,7 @@ class FeatureSpace(Layer): preprocessing_layer = feature_space.preprocessors["feature1"] # The crossing layer of each feature cross is available in `.crossers`. - # It's an instance of keras_core.layers.HashedCrossing. + # It's an instance of keras.layers.HashedCrossing. crossing_layer = feature_space.crossers["feature1_X_feature2"] ``` @@ -264,7 +264,7 @@ class FeatureSpace(Layer): ```python feature_space.save("featurespace.keras") - reloaded_feature_space = keras_core.models.load_model("featurespace.keras") + reloaded_feature_space = keras.models.load_model("featurespace.keras") ``` """ @@ -278,7 +278,7 @@ class FeatureSpace(Layer): @classmethod def float(cls, name=None): - from keras_core.layers.core import identity + from keras.layers.core import identity name = name or auto_name("float") preprocessor = identity.Identity( @@ -781,11 +781,11 @@ class FeatureSpace(Layer): def save(self, filepath): """Save the `FeatureSpace` instance to a `.keras` file. - You can reload it via `keras_core.models.load_model()`: + You can reload it via `keras.models.load_model()`: ```python feature_space.save("featurespace.keras") - reloaded_fs = keras_core.models.load_model("featurespace.keras") + reloaded_fs = keras.models.load_model("featurespace.keras") ``` """ saving_lib.save_model(self, filepath) diff --git a/keras_core/layers/preprocessing/feature_space_test.py b/keras/layers/preprocessing/feature_space_test.py similarity index 98% rename from keras_core/layers/preprocessing/feature_space_test.py rename to keras/layers/preprocessing/feature_space_test.py index 9616de341..bddcc6bce 100644 --- a/keras_core/layers/preprocessing/feature_space_test.py +++ b/keras/layers/preprocessing/feature_space_test.py @@ -3,13 +3,13 @@ import os import pytest from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import ops -from keras_core import testing -from keras_core.layers.preprocessing import feature_space -from keras_core.saving import saving_api +from keras import backend +from keras import layers +from keras import models +from keras import ops +from keras import testing +from keras.layers.preprocessing import feature_space +from keras.saving import saving_api class FeatureSpaceTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/hashed_crossing.py b/keras/layers/preprocessing/hashed_crossing.py similarity index 94% rename from keras_core/layers/preprocessing/hashed_crossing.py rename to keras/layers/preprocessing/hashed_crossing.py index 3d16941a0..a9d074543 100644 --- a/keras_core/layers/preprocessing/hashed_crossing.py +++ b/keras/layers/preprocessing/hashed_crossing.py @@ -1,13 +1,13 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation -from keras_core.utils import backend_utils -from keras_core.utils import tf_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.utils import argument_validation +from keras.utils import backend_utils +from keras.utils import tf_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.layers.HashedCrossing") +@keras_export("keras.layers.HashedCrossing") class HashedCrossing(Layer): """A preprocessing layer which crosses features using the "hashing trick". @@ -47,7 +47,7 @@ class HashedCrossing(Layer): **Crossing two scalar features.** - >>> layer = keras_core.layers.HashedCrossing( + >>> layer = keras.layers.HashedCrossing( ... num_bins=5) >>> feat1 = np.array(['A', 'B', 'A', 'B', 'A']) >>> feat2 = np.array([101, 101, 101, 102, 102]) @@ -56,7 +56,7 @@ class HashedCrossing(Layer): **Crossing and one-hotting two scalar features.** - >>> layer = keras_core.layers.HashedCrossing( + >>> layer = keras.layers.HashedCrossing( ... num_bins=5, output_mode='one_hot') >>> feat1 = np.array(['A', 'B', 'A', 'B', 'A']) >>> feat2 = np.array([101, 101, 101, 102, 102]) diff --git a/keras_core/layers/preprocessing/hashed_crossing_test.py b/keras/layers/preprocessing/hashed_crossing_test.py similarity index 98% rename from keras_core/layers/preprocessing/hashed_crossing_test.py rename to keras/layers/preprocessing/hashed_crossing_test.py index beea24394..53a4c0390 100644 --- a/keras_core/layers/preprocessing/hashed_crossing_test.py +++ b/keras/layers/preprocessing/hashed_crossing_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest import tensorflow as tf -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class HashedCrossingTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/hashing.py b/keras/layers/preprocessing/hashing.py similarity index 94% rename from keras_core/layers/preprocessing/hashing.py rename to keras/layers/preprocessing/hashing.py index edff2e0f8..067068102 100644 --- a/keras_core/layers/preprocessing/hashing.py +++ b/keras/layers/preprocessing/hashing.py @@ -1,12 +1,12 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.utils import backend_utils -from keras_core.utils import tf_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.utils import backend_utils +from keras.utils import tf_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.layers.Hashing") +@keras_export("keras.layers.Hashing") class Hashing(Layer): """A preprocessing layer which hashes and bins categorical features. @@ -38,7 +38,7 @@ class Hashing(Layer): **Example (FarmHash64)** - >>> layer = keras_core.layers.Hashing(num_bins=3) + >>> layer = keras.layers.Hashing(num_bins=3) >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']] >>> layer(inp) array([[1], @@ -49,7 +49,7 @@ class Hashing(Layer): **Example (FarmHash64) with a mask value** - >>> layer = keras_core.layers.Hashing(num_bins=3, mask_value='') + >>> layer = keras.layers.Hashing(num_bins=3, mask_value='') >>> inp = [['A'], ['B'], [''], ['C'], ['D']] >>> layer(inp) array([[1], @@ -60,7 +60,7 @@ class Hashing(Layer): **Example (SipHash64)** - >>> layer = keras_core.layers.Hashing(num_bins=3, salt=[133, 137]) + >>> layer = keras.layers.Hashing(num_bins=3, salt=[133, 137]) >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']] >>> layer(inp) array([[1], @@ -71,7 +71,7 @@ class Hashing(Layer): **Example (Siphash64 with a single integer, same as `salt=[133, 133]`)** - >>> layer = keras_core.layers.Hashing(num_bins=3, salt=133) + >>> layer = keras.layers.Hashing(num_bins=3, salt=133) >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']] >>> layer(inp) array([[0], diff --git a/keras_core/layers/preprocessing/hashing_test.py b/keras/layers/preprocessing/hashing_test.py similarity index 99% rename from keras_core/layers/preprocessing/hashing_test.py rename to keras/layers/preprocessing/hashing_test.py index 3c5b45020..4cd5f7166 100644 --- a/keras_core/layers/preprocessing/hashing_test.py +++ b/keras/layers/preprocessing/hashing_test.py @@ -5,11 +5,11 @@ import pytest import tensorflow as tf from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core.saving import load_model +from keras import backend +from keras import layers +from keras import models +from keras import testing +from keras.saving import load_model class ArrayLike: diff --git a/keras_core/layers/preprocessing/index_lookup.py b/keras/layers/preprocessing/index_lookup.py similarity index 99% rename from keras_core/layers/preprocessing/index_lookup.py rename to keras/layers/preprocessing/index_lookup.py index fc6430411..7f9d3d2f3 100644 --- a/keras_core/layers/preprocessing/index_lookup.py +++ b/keras/layers/preprocessing/index_lookup.py @@ -2,11 +2,11 @@ import collections import numpy as np -from keras_core import backend -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation -from keras_core.utils import tf_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.layers.layer import Layer +from keras.utils import argument_validation +from keras.utils import tf_utils +from keras.utils.module_utils import tensorflow as tf class IndexLookup(Layer): diff --git a/keras_core/layers/preprocessing/index_lookup_test.py b/keras/layers/preprocessing/index_lookup_test.py similarity index 98% rename from keras_core/layers/preprocessing/index_lookup_test.py rename to keras/layers/preprocessing/index_lookup_test.py index 2692dc964..7bf596e41 100644 --- a/keras_core/layers/preprocessing/index_lookup_test.py +++ b/keras/layers/preprocessing/index_lookup_test.py @@ -5,11 +5,11 @@ import pytest from absl.testing import parameterized from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core.saving import saving_api +from keras import backend +from keras import layers +from keras import models +from keras import testing +from keras.saving import saving_api @pytest.mark.skipif( diff --git a/keras_core/layers/preprocessing/integer_lookup.py b/keras/layers/preprocessing/integer_lookup.py similarity index 98% rename from keras_core/layers/preprocessing/integer_lookup.py rename to keras/layers/preprocessing/integer_lookup.py index d2d1654f8..b90c14790 100644 --- a/keras_core/layers/preprocessing/integer_lookup.py +++ b/keras/layers/preprocessing/integer_lookup.py @@ -1,13 +1,13 @@ import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.index_lookup import IndexLookup -from keras_core.utils import backend_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.index_lookup import IndexLookup +from keras.utils import backend_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.layers.IntegerLookup") +@keras_export("keras.layers.IntegerLookup") class IntegerLookup(IndexLookup): """A preprocessing layer that maps integers to (possibly encoded) indices. diff --git a/keras_core/layers/preprocessing/integer_lookup_test.py b/keras/layers/preprocessing/integer_lookup_test.py similarity index 97% rename from keras_core/layers/preprocessing/integer_lookup_test.py rename to keras/layers/preprocessing/integer_lookup_test.py index 1c362a978..ede05bedf 100644 --- a/keras_core/layers/preprocessing/integer_lookup_test.py +++ b/keras/layers/preprocessing/integer_lookup_test.py @@ -1,9 +1,9 @@ import numpy as np from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class IntegerLookupTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/normalization.py b/keras/layers/preprocessing/normalization.py similarity index 96% rename from keras_core/layers/preprocessing/normalization.py rename to keras/layers/preprocessing/normalization.py index ce4de3cd2..76a4c3797 100644 --- a/keras_core/layers/preprocessing/normalization.py +++ b/keras/layers/preprocessing/normalization.py @@ -2,14 +2,14 @@ import math import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.layers.Normalization") +@keras_export("keras.layers.Normalization") class Normalization(Layer): """A preprocessing layer that normalizes continuous features. @@ -54,7 +54,7 @@ class Normalization(Layer): >>> adapt_data = np.array([1., 2., 3., 4., 5.], dtype='float32') >>> input_data = np.array([1., 2., 3.], dtype='float32') - >>> layer = keras_core.layers.Normalization(axis=None) + >>> layer = keras.layers.Normalization(axis=None) >>> layer.adapt(adapt_data) >>> layer(input_data) array([-1.4142135, -0.70710677, 0.], dtype=float32) @@ -66,7 +66,7 @@ class Normalization(Layer): ... [0., 7., 4.], ... [2., 9., 6.]], dtype='float32') >>> input_data = np.array([[0., 7., 4.]], dtype='float32') - >>> layer = keras_core.layers.Normalization(axis=-1) + >>> layer = keras.layers.Normalization(axis=-1) >>> layer.adapt(adapt_data) >>> layer(input_data) array([-1., -1., -1.], dtype=float32) @@ -74,7 +74,7 @@ class Normalization(Layer): Pass the mean and variance directly. >>> input_data = np.array([[1.], [2.], [3.]], dtype='float32') - >>> layer = keras_core.layers.Normalization(mean=3., variance=2.) + >>> layer = keras.layers.Normalization(mean=3., variance=2.) >>> layer(input_data) array([[-1.4142135 ], [-0.70710677], @@ -87,7 +87,7 @@ class Normalization(Layer): ... [0., 7., 4.], ... [2., 9., 6.]], dtype='float32') >>> input_data = np.array([[1., 2., 3.]], dtype='float32') - >>> layer = keras_core.layers.Normalization(axis=-1, invert=True) + >>> layer = keras.layers.Normalization(axis=-1, invert=True) >>> layer.adapt(adapt_data) >>> layer(input_data) array([2., 10., 8.], dtype=float32) diff --git a/keras_core/layers/preprocessing/normalization_test.py b/keras/layers/preprocessing/normalization_test.py similarity index 96% rename from keras_core/layers/preprocessing/normalization_test.py rename to keras/layers/preprocessing/normalization_test.py index e90c684b5..c0c402709 100644 --- a/keras_core/layers/preprocessing/normalization_test.py +++ b/keras/layers/preprocessing/normalization_test.py @@ -3,9 +3,9 @@ import pytest from absl.testing import parameterized from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class NormalizationTest(testing.TestCase, parameterized.TestCase): @@ -100,7 +100,7 @@ class NormalizationTest(testing.TestCase, parameterized.TestCase): reason="Test symbolic call for torch meta device.", ) def test_call_on_meta_device_after_built(self): - from keras_core.backend.torch import core + from keras.backend.torch import core layer = layers.Normalization() data = np.random.random((32, 4)) diff --git a/keras_core/layers/preprocessing/random_brightness.py b/keras/layers/preprocessing/random_brightness.py similarity index 95% rename from keras_core/layers/preprocessing/random_brightness.py rename to keras/layers/preprocessing/random_brightness.py index 7d02ef954..74354886d 100644 --- a/keras_core/layers/preprocessing/random_brightness.py +++ b/keras/layers/preprocessing/random_brightness.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.random.seed_generator import SeedGenerator +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.random.seed_generator import SeedGenerator -@keras_core_export("keras_core.layers.RandomBrightness") +@keras_export("keras.layers.RandomBrightness") class RandomBrightness(TFDataLayer): """A preprocessing layer which randomly adjusts brightness during training. @@ -44,7 +44,7 @@ class RandomBrightness(TFDataLayer): Sample usage: ```python - random_bright = keras_core.layers.RandomBrightness(factor=0.2) + random_bright = keras.layers.RandomBrightness(factor=0.2) # An image with shape [2, 2, 3] image = [[[1, 2, 3], [4 ,5 ,6]], [[7, 8, 9], [10, 11, 12]]] diff --git a/keras_core/layers/preprocessing/random_brightness_test.py b/keras/layers/preprocessing/random_brightness_test.py similarity index 95% rename from keras_core/layers/preprocessing/random_brightness_test.py rename to keras/layers/preprocessing/random_brightness_test.py index 2081f1a9b..1044d28ea 100644 --- a/keras_core/layers/preprocessing/random_brightness_test.py +++ b/keras/layers/preprocessing/random_brightness_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class RandomBrightnessTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/random_contrast.py b/keras/layers/preprocessing/random_contrast.py similarity index 93% rename from keras_core/layers/preprocessing/random_contrast.py rename to keras/layers/preprocessing/random_contrast.py index d98938aec..c9d52727b 100644 --- a/keras_core/layers/preprocessing/random_contrast.py +++ b/keras/layers/preprocessing/random_contrast.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.random.seed_generator import SeedGenerator +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.random.seed_generator import SeedGenerator -@keras_core_export("keras_core.layers.RandomContrast") +@keras_export("keras.layers.RandomContrast") class RandomContrast(TFDataLayer): """A preprocessing layer which randomly adjusts contrast during training. diff --git a/keras_core/layers/preprocessing/random_contrast_test.py b/keras/layers/preprocessing/random_contrast_test.py similarity index 94% rename from keras_core/layers/preprocessing/random_contrast_test.py rename to keras/layers/preprocessing/random_contrast_test.py index f0b402442..95a9d1d85 100644 --- a/keras_core/layers/preprocessing/random_contrast_test.py +++ b/keras/layers/preprocessing/random_contrast_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class RandomContrastTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/random_crop.py b/keras/layers/preprocessing/random_crop.py similarity index 95% rename from keras_core/layers/preprocessing/random_crop.py rename to keras/layers/preprocessing/random_crop.py index c1422b5a8..6dfcc5b30 100644 --- a/keras_core/layers/preprocessing/random_crop.py +++ b/keras/layers/preprocessing/random_crop.py @@ -1,11 +1,11 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.random.seed_generator import SeedGenerator -from keras_core.utils import image_utils +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.random.seed_generator import SeedGenerator +from keras.utils import image_utils -@keras_core_export("keras_core.layers.RandomCrop") +@keras_export("keras.layers.RandomCrop") class RandomCrop(TFDataLayer): """A preprocessing layer which randomly crops images during training. diff --git a/keras_core/layers/preprocessing/random_crop_test.py b/keras/layers/preprocessing/random_crop_test.py similarity index 97% rename from keras_core/layers/preprocessing/random_crop_test.py rename to keras/layers/preprocessing/random_crop_test.py index 27fefb37b..a8982e8dc 100644 --- a/keras_core/layers/preprocessing/random_crop_test.py +++ b/keras/layers/preprocessing/random_crop_test.py @@ -1,8 +1,8 @@ import numpy as np from tensorflow import data as tf_data -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class RandomCropTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/random_flip.py b/keras/layers/preprocessing/random_flip.py similarity index 93% rename from keras_core/layers/preprocessing/random_flip.py rename to keras/layers/preprocessing/random_flip.py index b08bb4448..df0127432 100644 --- a/keras_core/layers/preprocessing/random_flip.py +++ b/keras/layers/preprocessing/random_flip.py @@ -1,13 +1,13 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.random.seed_generator import SeedGenerator +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.random.seed_generator import SeedGenerator HORIZONTAL = "horizontal" VERTICAL = "vertical" HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical" -@keras_core_export("keras_core.layers.RandomFlip") +@keras_export("keras.layers.RandomFlip") class RandomFlip(TFDataLayer): """A preprocessing layer which randomly flips images during training. diff --git a/keras_core/layers/preprocessing/random_flip_test.py b/keras/layers/preprocessing/random_flip_test.py similarity index 97% rename from keras_core/layers/preprocessing/random_flip_test.py rename to keras/layers/preprocessing/random_flip_test.py index 1d808b4c0..b143979aa 100644 --- a/keras_core/layers/preprocessing/random_flip_test.py +++ b/keras/layers/preprocessing/random_flip_test.py @@ -4,10 +4,10 @@ import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing -from keras_core import utils +from keras import backend +from keras import layers +from keras import testing +from keras import utils class MockedRandomFlip(layers.RandomFlip): diff --git a/keras_core/layers/preprocessing/random_rotation.py b/keras/layers/preprocessing/random_rotation.py similarity index 97% rename from keras_core/layers/preprocessing/random_rotation.py rename to keras/layers/preprocessing/random_rotation.py index 37b14fc14..4004834c3 100644 --- a/keras_core/layers/preprocessing/random_rotation.py +++ b/keras/layers/preprocessing/random_rotation.py @@ -1,12 +1,12 @@ import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.random.seed_generator import SeedGenerator +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.random.seed_generator import SeedGenerator -@keras_core_export("keras_core.layers.RandomRotation") +@keras_export("keras.layers.RandomRotation") class RandomRotation(TFDataLayer): """A preprocessing layer which randomly rotates images during training. diff --git a/keras_core/layers/preprocessing/random_rotation_test.py b/keras/layers/preprocessing/random_rotation_test.py similarity index 96% rename from keras_core/layers/preprocessing/random_rotation_test.py rename to keras/layers/preprocessing/random_rotation_test.py index e719a1fe4..8dc4a1042 100644 --- a/keras_core/layers/preprocessing/random_rotation_test.py +++ b/keras/layers/preprocessing/random_rotation_test.py @@ -2,9 +2,9 @@ import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class RandomRotationTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/preprocessing/random_translation.py b/keras/layers/preprocessing/random_translation.py similarity index 97% rename from keras_core/layers/preprocessing/random_translation.py rename to keras/layers/preprocessing/random_translation.py index a250382fb..33cbd8e85 100644 --- a/keras_core/layers/preprocessing/random_translation.py +++ b/keras/layers/preprocessing/random_translation.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.random.seed_generator import SeedGenerator +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.random.seed_generator import SeedGenerator -@keras_core_export("keras_core.layers.RandomTranslation") +@keras_export("keras.layers.RandomTranslation") class RandomTranslation(TFDataLayer): """A preprocessing layer which randomly translates images during training. diff --git a/keras_core/layers/preprocessing/random_translation_test.py b/keras/layers/preprocessing/random_translation_test.py similarity index 99% rename from keras_core/layers/preprocessing/random_translation_test.py rename to keras/layers/preprocessing/random_translation_test.py index 4a3a4ad57..7545dd96f 100644 --- a/keras_core/layers/preprocessing/random_translation_test.py +++ b/keras/layers/preprocessing/random_translation_test.py @@ -2,9 +2,9 @@ import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class RandomTranslationTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/preprocessing/random_zoom.py b/keras/layers/preprocessing/random_zoom.py similarity index 97% rename from keras_core/layers/preprocessing/random_zoom.py rename to keras/layers/preprocessing/random_zoom.py index d534eab9e..454325d94 100644 --- a/keras_core/layers/preprocessing/random_zoom.py +++ b/keras/layers/preprocessing/random_zoom.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.random.seed_generator import SeedGenerator +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.random.seed_generator import SeedGenerator -@keras_core_export("keras_core.layers.RandomZoom") +@keras_export("keras.layers.RandomZoom") class RandomZoom(TFDataLayer): """A preprocessing layer which randomly zooms images during training. @@ -83,7 +83,7 @@ class RandomZoom(TFDataLayer): Example: >>> input_img = np.random.random((32, 224, 224, 3)) - >>> layer = keras_core.layers.RandomZoom(.5, .2) + >>> layer = keras.layers.RandomZoom(.5, .2) >>> out_img = layer(input_img) """ diff --git a/keras_core/layers/preprocessing/random_zoom_test.py b/keras/layers/preprocessing/random_zoom_test.py similarity index 97% rename from keras_core/layers/preprocessing/random_zoom_test.py rename to keras/layers/preprocessing/random_zoom_test.py index 49514b91b..970b0c9c0 100644 --- a/keras_core/layers/preprocessing/random_zoom_test.py +++ b/keras/layers/preprocessing/random_zoom_test.py @@ -2,9 +2,9 @@ import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class RandomZoomTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/preprocessing/rescaling.py b/keras/layers/preprocessing/rescaling.py similarity index 90% rename from keras_core/layers/preprocessing/rescaling.py rename to keras/layers/preprocessing/rescaling.py index 8772ce741..78046fde9 100644 --- a/keras_core/layers/preprocessing/rescaling.py +++ b/keras/layers/preprocessing/rescaling.py @@ -1,9 +1,9 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer -@keras_core_export("keras_core.layers.Rescaling") +@keras_export("keras.layers.Rescaling") class Rescaling(TFDataLayer): """A preprocessing layer which rescales input values to a new range. diff --git a/keras_core/layers/preprocessing/rescaling_test.py b/keras/layers/preprocessing/rescaling_test.py similarity index 96% rename from keras_core/layers/preprocessing/rescaling_test.py rename to keras/layers/preprocessing/rescaling_test.py index 8d4ffb4bd..34ae51714 100644 --- a/keras_core/layers/preprocessing/rescaling_test.py +++ b/keras/layers/preprocessing/rescaling_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class RescalingTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/resizing.py b/keras/layers/preprocessing/resizing.py similarity index 94% rename from keras_core/layers/preprocessing/resizing.py rename to keras/layers/preprocessing/resizing.py index c967af45b..6a1475e22 100644 --- a/keras_core/layers/preprocessing/resizing.py +++ b/keras/layers/preprocessing/resizing.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer -from keras_core.utils import image_utils +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.utils import image_utils -@keras_core_export("keras_core.layers.Resizing") +@keras_export("keras.layers.Resizing") class Resizing(TFDataLayer): """A preprocessing layer which resizes images. diff --git a/keras_core/layers/preprocessing/resizing_test.py b/keras/layers/preprocessing/resizing_test.py similarity index 97% rename from keras_core/layers/preprocessing/resizing_test.py rename to keras/layers/preprocessing/resizing_test.py index 1a7ea79f6..31cc19b05 100644 --- a/keras_core/layers/preprocessing/resizing_test.py +++ b/keras/layers/preprocessing/resizing_test.py @@ -3,10 +3,10 @@ import pytest from absl.testing import parameterized from tensorflow import data as tf_data -from keras_core import Sequential -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import Sequential +from keras import backend +from keras import layers +from keras import testing class ResizingTest(testing.TestCase, parameterized.TestCase): @@ -190,7 +190,7 @@ class ResizingTest(testing.TestCase, parameterized.TestCase): ) def test_tf_data_compatibility_sequential(self): # Test compatibility when wrapping in a Sequential - # https://github.com/keras-team/keras-core/issues/347 + # https://github.com/keras-team/keras/issues/347 layer = layers.Resizing(8, 9) input_data = np.random.random((2, 10, 12, 3)) ds = ( diff --git a/keras_core/layers/preprocessing/string_lookup.py b/keras/layers/preprocessing/string_lookup.py similarity index 97% rename from keras_core/layers/preprocessing/string_lookup.py rename to keras/layers/preprocessing/string_lookup.py index 02bc9c546..515255592 100644 --- a/keras_core/layers/preprocessing/string_lookup.py +++ b/keras/layers/preprocessing/string_lookup.py @@ -1,20 +1,20 @@ import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.preprocessing.index_lookup import IndexLookup -from keras_core.utils import backend_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.layers.preprocessing.index_lookup import IndexLookup +from keras.utils import backend_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.layers.StringLookup") +@keras_export("keras.layers.StringLookup") class StringLookup(IndexLookup): """A preprocessing layer that maps strings to (possibly encoded) indices. This layer translates a set of arbitrary strings into integer output via a table-based vocabulary lookup. This layer will perform no splitting or transformation of input strings. For a layer than can split and tokenize - natural language, see the `keras_core.layers.TextVectorization` layer. + natural language, see the `keras.layers.TextVectorization` layer. The vocabulary for the layer must be either supplied on construction or learned via `adapt()`. During `adapt()`, the layer will analyze a data set, diff --git a/keras_core/layers/preprocessing/string_lookup_test.py b/keras/layers/preprocessing/string_lookup_test.py similarity index 95% rename from keras_core/layers/preprocessing/string_lookup_test.py rename to keras/layers/preprocessing/string_lookup_test.py index 5a33d890b..0f33f3d3b 100644 --- a/keras_core/layers/preprocessing/string_lookup_test.py +++ b/keras/layers/preprocessing/string_lookup_test.py @@ -1,9 +1,9 @@ import numpy as np from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class StringLookupTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/text_vectorization.py b/keras/layers/preprocessing/text_vectorization.py similarity index 97% rename from keras_core/layers/preprocessing/text_vectorization.py rename to keras/layers/preprocessing/text_vectorization.py index c15aadd37..a8e2ea21f 100644 --- a/keras_core/layers/preprocessing/text_vectorization.py +++ b/keras/layers/preprocessing/text_vectorization.py @@ -1,18 +1,18 @@ import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.layers.preprocessing.index_lookup import listify_tensors -from keras_core.layers.preprocessing.string_lookup import StringLookup -from keras_core.saving import serialization_lib -from keras_core.utils import argument_validation -from keras_core.utils import backend_utils -from keras_core.utils import tf_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.layers.preprocessing.index_lookup import listify_tensors +from keras.layers.preprocessing.string_lookup import StringLookup +from keras.saving import serialization_lib +from keras.utils import argument_validation +from keras.utils import backend_utils +from keras.utils import tf_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.layers.TextVectorization") +@keras_export("keras.layers.TextVectorization") class TextVectorization(Layer): """A preprocessing layer which maps text features to integer sequences. @@ -46,7 +46,7 @@ class TextVectorization(Layer): 1. Any callable can be passed to this Layer, but if you want to serialize this object you should only pass functions that are registered Keras - serializables (see `keras_core.saving.register_keras_serializable` + serializables (see `keras.saving.register_keras_serializable` for more details). 2. When using a custom callable for `standardize`, the data received by the callable will be exactly as passed to this layer. The callable @@ -186,7 +186,7 @@ class TextVectorization(Layer): >>> # Create the layer, passing the vocab directly. You can also pass the >>> # vocabulary arg a path to a file containing one vocabulary word per >>> # line. - >>> vectorize_layer = keras_core.layers.TextVectorization( + >>> vectorize_layer = keras.layers.TextVectorization( ... max_tokens=max_tokens, ... output_mode='int', ... output_sequence_length=max_len, diff --git a/keras_core/layers/preprocessing/text_vectorization_test.py b/keras/layers/preprocessing/text_vectorization_test.py similarity index 96% rename from keras_core/layers/preprocessing/text_vectorization_test.py rename to keras/layers/preprocessing/text_vectorization_test.py index 99caa0e90..11078a8d1 100644 --- a/keras_core/layers/preprocessing/text_vectorization_test.py +++ b/keras/layers/preprocessing/text_vectorization_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest from tensorflow import data as tf_data -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing +from keras import backend +from keras import layers +from keras import models +from keras import testing class TextVectorizationTest(testing.TestCase): diff --git a/keras_core/layers/preprocessing/tf_data_layer.py b/keras/layers/preprocessing/tf_data_layer.py similarity index 84% rename from keras_core/layers/preprocessing/tf_data_layer.py rename to keras/layers/preprocessing/tf_data_layer.py index b93f90bbe..3659153df 100644 --- a/keras_core/layers/preprocessing/tf_data_layer.py +++ b/keras/layers/preprocessing/tf_data_layer.py @@ -1,10 +1,10 @@ import tree -import keras_core.backend -from keras_core.layers.layer import Layer -from keras_core.random.seed_generator import SeedGenerator -from keras_core.utils import backend_utils -from keras_core.utils import tracking +import keras.backend +from keras.layers.layer import Layer +from keras.random.seed_generator import SeedGenerator +from keras.utils import backend_utils +from keras.utils import tracking class TFDataLayer(Layer): @@ -22,7 +22,7 @@ class TFDataLayer(Layer): def __call__(self, inputs, **kwargs): if backend_utils.in_tf_graph() and not isinstance( - inputs, keras_core.KerasTensor + inputs, keras.KerasTensor ): # We're in a TF graph, e.g. a tf.data pipeline. self.backend.set_backend("tensorflow") @@ -47,7 +47,7 @@ class TFDataLayer(Layer): @tracking.no_automatic_dependency_tracking def _get_seed_generator(self, backend=None): - if backend is None or backend == keras_core.backend.backend(): + if backend is None or backend == keras.backend.backend(): return self.generator if not hasattr(self, "_backend_generators"): self._backend_generators = {} diff --git a/keras_core/layers/regularization/__init__.py b/keras/layers/regularization/__init__.py similarity index 100% rename from keras_core/layers/regularization/__init__.py rename to keras/layers/regularization/__init__.py diff --git a/keras_core/layers/regularization/activity_regularization.py b/keras/layers/regularization/activity_regularization.py similarity index 84% rename from keras_core/layers/regularization/activity_regularization.py rename to keras/layers/regularization/activity_regularization.py index 28bdca9ce..c3908ec74 100644 --- a/keras_core/layers/regularization/activity_regularization.py +++ b/keras/layers/regularization/activity_regularization.py @@ -1,9 +1,9 @@ -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.ActivityRegularization") +@keras_export("keras.layers.ActivityRegularization") class ActivityRegularization(Layer): """Layer that applies an update to the cost function based input activity. diff --git a/keras_core/layers/regularization/activity_regularization_test.py b/keras/layers/regularization/activity_regularization_test.py similarity index 92% rename from keras_core/layers/regularization/activity_regularization_test.py rename to keras/layers/regularization/activity_regularization_test.py index 075d5c19f..bc9263559 100644 --- a/keras_core/layers/regularization/activity_regularization_test.py +++ b/keras/layers/regularization/activity_regularization_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras_core import layers -from keras_core.testing import test_case +from keras import layers +from keras.testing import test_case class ActivityRegularizationTest(test_case.TestCase): diff --git a/keras_core/layers/regularization/dropout.py b/keras/layers/regularization/dropout.py similarity index 94% rename from keras_core/layers/regularization/dropout.py rename to keras/layers/regularization/dropout.py index 02187d05b..33c5fb79f 100644 --- a/keras_core/layers/regularization/dropout.py +++ b/keras/layers/regularization/dropout.py @@ -1,9 +1,9 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer +from keras import backend +from keras.api_export import keras_export +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Dropout") +@keras_export("keras.layers.Dropout") class Dropout(Layer): """Applies dropout to the input. diff --git a/keras_core/layers/regularization/dropout_test.py b/keras/layers/regularization/dropout_test.py similarity index 94% rename from keras_core/layers/regularization/dropout_test.py rename to keras/layers/regularization/dropout_test.py index f9fe4545c..647edcf1d 100644 --- a/keras_core/layers/regularization/dropout_test.py +++ b/keras/layers/regularization/dropout_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class DropoutTest(testing.TestCase): diff --git a/keras_core/layers/regularization/gaussian_dropout.py b/keras/layers/regularization/gaussian_dropout.py similarity index 90% rename from keras_core/layers/regularization/gaussian_dropout.py rename to keras/layers/regularization/gaussian_dropout.py index 198e185ae..15f66781c 100644 --- a/keras_core/layers/regularization/gaussian_dropout.py +++ b/keras/layers/regularization/gaussian_dropout.py @@ -1,12 +1,12 @@ import math -from keras_core import backend -from keras_core import layers -from keras_core import ops -from keras_core.api_export import keras_core_export +from keras import backend +from keras import layers +from keras import ops +from keras.api_export import keras_export -@keras_core_export("keras_core.layers.GaussianDropout") +@keras_export("keras.layers.GaussianDropout") class GaussianDropout(layers.Layer): """Apply multiplicative 1-centered Gaussian noise. diff --git a/keras_core/layers/regularization/gaussian_dropout_test.py b/keras/layers/regularization/gaussian_dropout_test.py similarity index 91% rename from keras_core/layers/regularization/gaussian_dropout_test.py rename to keras/layers/regularization/gaussian_dropout_test.py index 852db925d..8e88b39ff 100644 --- a/keras_core/layers/regularization/gaussian_dropout_test.py +++ b/keras/layers/regularization/gaussian_dropout_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class GaussianDropoutTest(testing.TestCase): diff --git a/keras_core/layers/regularization/gaussian_noise.py b/keras/layers/regularization/gaussian_noise.py similarity index 90% rename from keras_core/layers/regularization/gaussian_noise.py rename to keras/layers/regularization/gaussian_noise.py index 311dbb48a..36d168e04 100644 --- a/keras_core/layers/regularization/gaussian_noise.py +++ b/keras/layers/regularization/gaussian_noise.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import layers -from keras_core import ops -from keras_core.api_export import keras_core_export +from keras import backend +from keras import layers +from keras import ops +from keras.api_export import keras_export -@keras_core_export("keras_core.layers.GaussianNoise") +@keras_export("keras.layers.GaussianNoise") class GaussianNoise(layers.Layer): """Apply additive zero-centered Gaussian noise. diff --git a/keras_core/layers/regularization/gaussian_noise_test.py b/keras/layers/regularization/gaussian_noise_test.py similarity index 90% rename from keras_core/layers/regularization/gaussian_noise_test.py rename to keras/layers/regularization/gaussian_noise_test.py index 27066dbb4..63aa4c6b6 100644 --- a/keras_core/layers/regularization/gaussian_noise_test.py +++ b/keras/layers/regularization/gaussian_noise_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class GaussianNoiseTest(testing.TestCase): diff --git a/keras_core/layers/regularization/spatial_dropout.py b/keras/layers/regularization/spatial_dropout.py similarity index 94% rename from keras_core/layers/regularization/spatial_dropout.py rename to keras/layers/regularization/spatial_dropout.py index 41bcdd9a2..c7ac9a3d0 100644 --- a/keras_core/layers/regularization/spatial_dropout.py +++ b/keras/layers/regularization/spatial_dropout.py @@ -1,8 +1,8 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.regularization.dropout import Dropout +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.regularization.dropout import Dropout class BaseSpatialDropout(Dropout): @@ -28,7 +28,7 @@ class BaseSpatialDropout(Dropout): } -@keras_core_export("keras_core.layers.SpatialDropout1D") +@keras_export("keras.layers.SpatialDropout1D") class SpatialDropout1D(BaseSpatialDropout): """Spatial 1D version of Dropout. @@ -68,7 +68,7 @@ class SpatialDropout1D(BaseSpatialDropout): return (input_shape[0], 1, input_shape[2]) -@keras_core_export("keras_core.layers.SpatialDropout2D") +@keras_export("keras.layers.SpatialDropout2D") class SpatialDropout2D(BaseSpatialDropout): """Spatial 2D version of Dropout. @@ -130,7 +130,7 @@ class SpatialDropout2D(BaseSpatialDropout): return {**base_config, **config} -@keras_core_export("keras_core.layers.SpatialDropout3D") +@keras_export("keras.layers.SpatialDropout3D") class SpatialDropout3D(BaseSpatialDropout): """Spatial 3D version of Dropout. diff --git a/keras_core/layers/regularization/spatial_dropout_test.py b/keras/layers/regularization/spatial_dropout_test.py similarity index 97% rename from keras_core/layers/regularization/spatial_dropout_test.py rename to keras/layers/regularization/spatial_dropout_test.py index a6681621b..9bcdb1faa 100644 --- a/keras_core/layers/regularization/spatial_dropout_test.py +++ b/keras/layers/regularization/spatial_dropout_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras_core import layers -from keras_core.testing import test_case +from keras import layers +from keras.testing import test_case class SpatialDropoutTest(test_case.TestCase): diff --git a/keras_core/layers/reshaping/__init__.py b/keras/layers/reshaping/__init__.py similarity index 100% rename from keras_core/layers/reshaping/__init__.py rename to keras/layers/reshaping/__init__.py diff --git a/keras_core/layers/reshaping/cropping1d.py b/keras/layers/reshaping/cropping1d.py similarity index 89% rename from keras_core/layers/reshaping/cropping1d.py rename to keras/layers/reshaping/cropping1d.py index d0c05903b..2b775e6fb 100644 --- a/keras_core/layers/reshaping/cropping1d.py +++ b/keras/layers/reshaping/cropping1d.py @@ -1,10 +1,10 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils import argument_validation -@keras_core_export("keras_core.layers.Cropping1D") +@keras_export("keras.layers.Cropping1D") class Cropping1D(Layer): """Cropping layer for 1D input (e.g. temporal sequence). @@ -21,7 +21,7 @@ class Cropping1D(Layer): [[ 6 7] [ 8 9] [10 11]]] - >>> y = keras_core.layers.Cropping1D(cropping=1)(x) + >>> y = keras.layers.Cropping1D(cropping=1)(x) >>> y [[[2 3]] [[8 9]]] diff --git a/keras_core/layers/reshaping/cropping1d_test.py b/keras/layers/reshaping/cropping1d_test.py similarity index 96% rename from keras_core/layers/reshaping/cropping1d_test.py rename to keras/layers/reshaping/cropping1d_test.py index 67862843d..c9d67c235 100644 --- a/keras_core/layers/reshaping/cropping1d_test.py +++ b/keras/layers/reshaping/cropping1d_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import layers +from keras import ops +from keras import testing class Cropping1DTest(testing.TestCase): diff --git a/keras_core/layers/reshaping/cropping2d.py b/keras/layers/reshaping/cropping2d.py similarity index 96% rename from keras_core/layers/reshaping/cropping2d.py rename to keras/layers/reshaping/cropping2d.py index 5c329c184..05bb82bc4 100644 --- a/keras_core/layers/reshaping/cropping2d.py +++ b/keras/layers/reshaping/cropping2d.py @@ -1,11 +1,11 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation +from keras import backend +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils import argument_validation -@keras_core_export("keras_core.layers.Cropping2D") +@keras_export("keras.layers.Cropping2D") class Cropping2D(Layer): """Cropping layer for 2D input (e.g. picture). @@ -15,7 +15,7 @@ class Cropping2D(Layer): >>> input_shape = (2, 28, 28, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) - >>> y = keras_core.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x) + >>> y = keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x) >>> y.shape (2, 24, 20, 3) diff --git a/keras_core/layers/reshaping/cropping2d_test.py b/keras/layers/reshaping/cropping2d_test.py similarity index 97% rename from keras_core/layers/reshaping/cropping2d_test.py rename to keras/layers/reshaping/cropping2d_test.py index d413a64fd..6033223a3 100644 --- a/keras_core/layers/reshaping/cropping2d_test.py +++ b/keras/layers/reshaping/cropping2d_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import layers +from keras import ops +from keras import testing class Cropping2DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/cropping3d.py b/keras/layers/reshaping/cropping3d.py similarity index 96% rename from keras_core/layers/reshaping/cropping3d.py rename to keras/layers/reshaping/cropping3d.py index 2c0710890..ee55e886c 100644 --- a/keras_core/layers/reshaping/cropping3d.py +++ b/keras/layers/reshaping/cropping3d.py @@ -1,11 +1,11 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation +from keras import backend +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils import argument_validation -@keras_core_export("keras_core.layers.Cropping3D") +@keras_export("keras.layers.Cropping3D") class Cropping3D(Layer): """Cropping layer for 3D data (e.g. spatial or spatio-temporal). @@ -13,7 +13,7 @@ class Cropping3D(Layer): >>> input_shape = (2, 28, 28, 10, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) - >>> y = keras_core.layers.Cropping3D(cropping=(2, 4, 2))(x) + >>> y = keras.layers.Cropping3D(cropping=(2, 4, 2))(x) >>> y.shape (2, 24, 20, 6, 3) diff --git a/keras_core/layers/reshaping/cropping3d_test.py b/keras/layers/reshaping/cropping3d_test.py similarity index 98% rename from keras_core/layers/reshaping/cropping3d_test.py rename to keras/layers/reshaping/cropping3d_test.py index e9f067c67..17ec204bd 100644 --- a/keras_core/layers/reshaping/cropping3d_test.py +++ b/keras/layers/reshaping/cropping3d_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import layers +from keras import ops +from keras import testing class Cropping3DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/flatten.py b/keras/layers/reshaping/flatten.py similarity index 85% rename from keras_core/layers/reshaping/flatten.py rename to keras/layers/reshaping/flatten.py index 96074348a..da72ce961 100644 --- a/keras_core/layers/reshaping/flatten.py +++ b/keras/layers/reshaping/flatten.py @@ -1,14 +1,14 @@ import math -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.backend.common.keras_tensor import KerasTensor +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Flatten") +@keras_export("keras.layers.Flatten") class Flatten(Layer): """Flattens the input. Does not affect the batch size. @@ -27,8 +27,8 @@ class Flatten(Layer): Example: - >>> x = keras_core.Input(shape=(10, 64)) - >>> y = keras_core.layers.Flatten()(x) + >>> x = keras.Input(shape=(10, 64)) + >>> y = keras.layers.Flatten()(x) >>> y.shape (None, 640) """ diff --git a/keras_core/layers/reshaping/flatten_test.py b/keras/layers/reshaping/flatten_test.py similarity index 96% rename from keras_core/layers/reshaping/flatten_test.py rename to keras/layers/reshaping/flatten_test.py index 38e66fdd7..c0014ccb0 100644 --- a/keras_core/layers/reshaping/flatten_test.py +++ b/keras/layers/reshaping/flatten_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import backend +from keras import layers +from keras import ops +from keras import testing class FlattenTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/permute.py b/keras/layers/reshaping/permute.py similarity index 82% rename from keras_core/layers/reshaping/permute.py rename to keras/layers/reshaping/permute.py index 9defed30d..30d52e4a7 100644 --- a/keras_core/layers/reshaping/permute.py +++ b/keras/layers/reshaping/permute.py @@ -1,11 +1,11 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras import ops +from keras.api_export import keras_export +from keras.backend.common.keras_tensor import KerasTensor +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.Permute") +@keras_export("keras.layers.Permute") class Permute(Layer): """Permutes the dimensions of the input according to a given pattern. @@ -26,8 +26,8 @@ class Permute(Layer): Example: - >>> x = keras_core.Input(shape=(10, 64)) - >>> y = keras_core.layers.Permute((2, 1))(x) + >>> x = keras.Input(shape=(10, 64)) + >>> y = keras.layers.Permute((2, 1))(x) >>> y.shape (None, 64, 10) """ diff --git a/keras_core/layers/reshaping/permute_test.py b/keras/layers/reshaping/permute_test.py similarity index 94% rename from keras_core/layers/reshaping/permute_test.py rename to keras/layers/reshaping/permute_test.py index ad2a1ebf3..8eac612b7 100644 --- a/keras_core/layers/reshaping/permute_test.py +++ b/keras/layers/reshaping/permute_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import backend +from keras import layers +from keras import ops +from keras import testing class PermuteTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/repeat_vector.py b/keras/layers/reshaping/repeat_vector.py similarity index 76% rename from keras_core/layers/reshaping/repeat_vector.py rename to keras/layers/reshaping/repeat_vector.py index 15f679b5e..0e9507e10 100644 --- a/keras_core/layers/reshaping/repeat_vector.py +++ b/keras/layers/reshaping/repeat_vector.py @@ -1,17 +1,17 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras import ops +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.RepeatVector") +@keras_export("keras.layers.RepeatVector") class RepeatVector(Layer): """Repeats the input n times. Example: - >>> x = keras_core.Input(shape=(32,)) - >>> y = keras_core.layers.RepeatVector(3)(x) + >>> x = keras.Input(shape=(32,)) + >>> y = keras.layers.RepeatVector(3)(x) >>> y.shape (None, 3, 32) diff --git a/keras_core/layers/reshaping/repeat_vector_test.py b/keras/layers/reshaping/repeat_vector_test.py similarity index 91% rename from keras_core/layers/reshaping/repeat_vector_test.py rename to keras/layers/reshaping/repeat_vector_test.py index 992080566..7bc0cd591 100644 --- a/keras_core/layers/reshaping/repeat_vector_test.py +++ b/keras/layers/reshaping/repeat_vector_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import layers +from keras import ops +from keras import testing class FlattenTest(testing.TestCase): diff --git a/keras_core/layers/reshaping/reshape.py b/keras/layers/reshaping/reshape.py similarity index 79% rename from keras_core/layers/reshaping/reshape.py rename to keras/layers/reshaping/reshape.py index c19616fde..18001a964 100644 --- a/keras_core/layers/reshaping/reshape.py +++ b/keras/layers/reshaping/reshape.py @@ -1,11 +1,11 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.layers.layer import Layer -from keras_core.ops import operation_utils +from keras import ops +from keras.api_export import keras_export +from keras.backend.common.keras_tensor import KerasTensor +from keras.layers.layer import Layer +from keras.ops import operation_utils -@keras_core_export("keras_core.layers.Reshape") +@keras_export("keras.layers.Reshape") class Reshape(Layer): """Layer that reshapes inputs into the given shape. @@ -24,13 +24,13 @@ class Reshape(Layer): Example: - >>> x = keras_core.Input(shape=(12,)) - >>> y = keras_core.layers.Reshape((3, 4))(x) + >>> x = keras.Input(shape=(12,)) + >>> y = keras.layers.Reshape((3, 4))(x) >>> y.shape (None, 3, 4) >>> # also supports shape inference using `-1` as dimension - >>> y = keras_core.layers.Reshape((-1, 2, 2))(x) + >>> y = keras.layers.Reshape((-1, 2, 2))(x) >>> y.shape (None, 3, 2, 2) """ diff --git a/keras_core/layers/reshaping/reshape_test.py b/keras/layers/reshaping/reshape_test.py similarity index 97% rename from keras_core/layers/reshaping/reshape_test.py rename to keras/layers/reshaping/reshape_test.py index 0f6cb6483..25afa22b6 100644 --- a/keras_core/layers/reshaping/reshape_test.py +++ b/keras/layers/reshaping/reshape_test.py @@ -1,9 +1,9 @@ import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class ReshapeTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/up_sampling1d.py b/keras/layers/reshaping/up_sampling1d.py similarity index 82% rename from keras_core/layers/reshaping/up_sampling1d.py rename to keras/layers/reshaping/up_sampling1d.py index 3716d6eeb..2f264b1be 100644 --- a/keras_core/layers/reshaping/up_sampling1d.py +++ b/keras/layers/reshaping/up_sampling1d.py @@ -1,10 +1,10 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer +from keras import ops +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.UpSampling1D") +@keras_export("keras.layers.UpSampling1D") class UpSampling1D(Layer): """Upsampling layer for 1D inputs. @@ -19,7 +19,7 @@ class UpSampling1D(Layer): [ 3 4 5]] [[ 6 7 8] [ 9 10 11]]] - >>> y = keras_core.layers.UpSampling1D(size=2)(x) + >>> y = keras.layers.UpSampling1D(size=2)(x) >>> y [[[ 0. 1. 2.] [ 0. 1. 2.] diff --git a/keras_core/layers/reshaping/up_sampling1d_test.py b/keras/layers/reshaping/up_sampling1d_test.py similarity index 94% rename from keras_core/layers/reshaping/up_sampling1d_test.py rename to keras/layers/reshaping/up_sampling1d_test.py index 45417e585..aa5f8c0de 100644 --- a/keras_core/layers/reshaping/up_sampling1d_test.py +++ b/keras/layers/reshaping/up_sampling1d_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import layers -from keras_core import testing -from keras_core.backend.common.keras_tensor import KerasTensor +from keras import layers +from keras import testing +from keras.backend.common.keras_tensor import KerasTensor class UpSamplingTest(testing.TestCase): diff --git a/keras_core/layers/reshaping/up_sampling2d.py b/keras/layers/reshaping/up_sampling2d.py similarity index 92% rename from keras_core/layers/reshaping/up_sampling2d.py rename to keras/layers/reshaping/up_sampling2d.py index 4c7780036..6009c67e5 100644 --- a/keras_core/layers/reshaping/up_sampling2d.py +++ b/keras/layers/reshaping/up_sampling2d.py @@ -1,14 +1,14 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils import argument_validation -@keras_core_export("keras_core.layers.UpSampling2D") +@keras_export("keras.layers.UpSampling2D") class UpSampling2D(Layer): """Upsampling layer for 2D inputs. @@ -25,7 +25,7 @@ class UpSampling2D(Layer): [[ 3 4 5]]] [[[ 6 7 8]] [[ 9 10 11]]]] - >>> y = keras_core.layers.UpSampling2D(size=(1, 2))(x) + >>> y = keras.layers.UpSampling2D(size=(1, 2))(x) >>> print(y) [[[[ 0 1 2] [ 0 1 2]] @@ -154,7 +154,7 @@ class UpSampling2D(Layer): if data_format == "channels_first": x = ops.transpose(x, [0, 2, 3, 1]) - # https://github.com/keras-team/keras-core/issues/294 + # https://github.com/keras-team/keras/issues/294 # Use `ops.repeat` for `nearest` interpolation if interpolation == "nearest": x = ops.repeat(x, height_factor, axis=1) diff --git a/keras_core/layers/reshaping/up_sampling2d_test.py b/keras/layers/reshaping/up_sampling2d_test.py similarity index 98% rename from keras_core/layers/reshaping/up_sampling2d_test.py rename to keras/layers/reshaping/up_sampling2d_test.py index b87e8174c..edb3b8a5b 100644 --- a/keras_core/layers/reshaping/up_sampling2d_test.py +++ b/keras/layers/reshaping/up_sampling2d_test.py @@ -3,9 +3,9 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class UpSampling2dTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/up_sampling3d.py b/keras/layers/reshaping/up_sampling3d.py similarity index 92% rename from keras_core/layers/reshaping/up_sampling3d.py rename to keras/layers/reshaping/up_sampling3d.py index 2e96a9498..6b74a8725 100644 --- a/keras_core/layers/reshaping/up_sampling3d.py +++ b/keras/layers/reshaping/up_sampling3d.py @@ -1,12 +1,12 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils import argument_validation -@keras_core_export("keras_core.layers.UpSampling3D") +@keras_export("keras.layers.UpSampling3D") class UpSampling3D(Layer): """Upsampling layer for 3D inputs. @@ -17,7 +17,7 @@ class UpSampling3D(Layer): >>> input_shape = (2, 1, 2, 1, 3) >>> x = np.ones(input_shape) - >>> y = keras_core.layers.UpSampling3D(size=(2, 2, 2))(x) + >>> y = keras.layers.UpSampling3D(size=(2, 2, 2))(x) >>> y.shape (2, 2, 4, 2, 3) diff --git a/keras_core/layers/reshaping/up_sampling3d_test.py b/keras/layers/reshaping/up_sampling3d_test.py similarity index 98% rename from keras_core/layers/reshaping/up_sampling3d_test.py rename to keras/layers/reshaping/up_sampling3d_test.py index c747dff9f..03f32fa13 100644 --- a/keras_core/layers/reshaping/up_sampling3d_test.py +++ b/keras/layers/reshaping/up_sampling3d_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import backend -from keras_core import layers -from keras_core import testing +from keras import backend +from keras import layers +from keras import testing class UpSampling3dTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/zero_padding1d.py b/keras/layers/reshaping/zero_padding1d.py similarity index 84% rename from keras_core/layers/reshaping/zero_padding1d.py rename to keras/layers/reshaping/zero_padding1d.py index 102681387..4ce263c8a 100644 --- a/keras_core/layers/reshaping/zero_padding1d.py +++ b/keras/layers/reshaping/zero_padding1d.py @@ -1,11 +1,11 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation +from keras import ops +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils import argument_validation -@keras_core_export("keras_core.layers.ZeroPadding1D") +@keras_export("keras.layers.ZeroPadding1D") class ZeroPadding1D(Layer): """Zero-padding layer for 1D input (e.g. temporal sequence). @@ -18,7 +18,7 @@ class ZeroPadding1D(Layer): [ 3 4 5]] [[ 6 7 8] [ 9 10 11]]] - >>> y = keras_core.layers.ZeroPadding1D(padding=2)(x) + >>> y = keras.layers.ZeroPadding1D(padding=2)(x) >>> y [[[ 0 0 0] [ 0 0 0] diff --git a/keras_core/layers/reshaping/zero_padding1d_test.py b/keras/layers/reshaping/zero_padding1d_test.py similarity index 95% rename from keras_core/layers/reshaping/zero_padding1d_test.py rename to keras/layers/reshaping/zero_padding1d_test.py index f448e5440..0fc888959 100644 --- a/keras_core/layers/reshaping/zero_padding1d_test.py +++ b/keras/layers/reshaping/zero_padding1d_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class ZeroPadding1DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/zero_padding2d.py b/keras/layers/reshaping/zero_padding2d.py similarity index 92% rename from keras_core/layers/reshaping/zero_padding2d.py rename to keras/layers/reshaping/zero_padding2d.py index f98f2f27b..752c07ee2 100644 --- a/keras_core/layers/reshaping/zero_padding2d.py +++ b/keras/layers/reshaping/zero_padding2d.py @@ -1,12 +1,12 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils import argument_validation -@keras_core_export("keras_core.layers.ZeroPadding2D") +@keras_export("keras.layers.ZeroPadding2D") class ZeroPadding2D(Layer): """Zero-padding layer for 2D input (e.g. picture). @@ -20,7 +20,7 @@ class ZeroPadding2D(Layer): >>> x [[[[0 1] [2 3]]]] - >>> y = keras_core.layers.ZeroPadding2D(padding=1)(x) + >>> y = keras.layers.ZeroPadding2D(padding=1)(x) >>> y [[[[0 0] [0 0] diff --git a/keras_core/layers/reshaping/zero_padding2d_test.py b/keras/layers/reshaping/zero_padding2d_test.py similarity index 98% rename from keras_core/layers/reshaping/zero_padding2d_test.py rename to keras/layers/reshaping/zero_padding2d_test.py index db5c139bb..43ba1bb94 100644 --- a/keras_core/layers/reshaping/zero_padding2d_test.py +++ b/keras/layers/reshaping/zero_padding2d_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/reshaping/zero_padding3d.py b/keras/layers/reshaping/zero_padding3d.py similarity index 92% rename from keras_core/layers/reshaping/zero_padding3d.py rename to keras/layers/reshaping/zero_padding3d.py index bf50ae930..9f2a253bc 100644 --- a/keras_core/layers/reshaping/zero_padding3d.py +++ b/keras/layers/reshaping/zero_padding3d.py @@ -1,12 +1,12 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.utils import argument_validation -@keras_core_export("keras_core.layers.ZeroPadding3D") +@keras_export("keras.layers.ZeroPadding3D") class ZeroPadding3D(Layer): """Zero-padding layer for 3D data (spatial or spatio-temporal). @@ -14,7 +14,7 @@ class ZeroPadding3D(Layer): >>> input_shape = (1, 1, 2, 2, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) - >>> y = keras_core.layers.ZeroPadding3D(padding=2)(x) + >>> y = keras.layers.ZeroPadding3D(padding=2)(x) >>> y.shape (1, 5, 6, 6, 3) diff --git a/keras_core/layers/reshaping/zero_padding3d_test.py b/keras/layers/reshaping/zero_padding3d_test.py similarity index 98% rename from keras_core/layers/reshaping/zero_padding3d_test.py rename to keras/layers/reshaping/zero_padding3d_test.py index 4caf5a7c1..d4b161066 100644 --- a/keras_core/layers/reshaping/zero_padding3d_test.py +++ b/keras/layers/reshaping/zero_padding3d_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras_core import layers -from keras_core import testing +from keras import layers +from keras import testing class ZeroPadding3DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/rnn/__init__.py b/keras/layers/rnn/__init__.py similarity index 100% rename from keras_core/layers/rnn/__init__.py rename to keras/layers/rnn/__init__.py diff --git a/keras_core/layers/rnn/bidirectional.py b/keras/layers/rnn/bidirectional.py similarity index 93% rename from keras_core/layers/rnn/bidirectional.py rename to keras/layers/rnn/bidirectional.py index 14db6ab1a..910473b92 100644 --- a/keras_core/layers/rnn/bidirectional.py +++ b/keras/layers/rnn/bidirectional.py @@ -1,21 +1,21 @@ import copy -from keras_core import ops -from keras_core import utils -from keras_core.api_export import keras_core_export -from keras_core.layers.core.wrapper import Wrapper -from keras_core.layers.layer import Layer -from keras_core.saving import serialization_lib +from keras import ops +from keras import utils +from keras.api_export import keras_export +from keras.layers.core.wrapper import Wrapper +from keras.layers.layer import Layer +from keras.saving import serialization_lib -@keras_core_export("keras_core.layers.Bidirectional") +@keras_export("keras.layers.Bidirectional") class Bidirectional(Wrapper): """Bidirectional wrapper for RNNs. Args: - layer: `keras_core.layers.RNN` instance, such as - `keras_core.layers.LSTM` or `keras_core.layers.GRU`. - It could also be a `keras_core.layers.Layer` instance + layer: `keras.layers.RNN` instance, such as + `keras.layers.LSTM` or `keras.layers.GRU`. + It could also be a `keras.layers.Layer` instance that meets the following criteria: 1. Be a sequence-processing layer (accepts 3D+ inputs). 2. Have a `go_backwards`, `return_sequences` and `return_state` @@ -23,8 +23,8 @@ class Bidirectional(Wrapper): 3. Have an `input_spec` attribute. 4. Implement serialization via `get_config()` and `from_config()`. Note that the recommended way to create new RNN layers is to write a - custom RNN cell and use it with `keras_core.layers.RNN`, instead of - subclassing `keras_core.layers.Layer` directly. + custom RNN cell and use it with `keras.layers.RNN`, instead of + subclassing `keras.layers.Layer` directly. When `return_sequences` is `True`, the output of the masked timestep will be zero regardless of the layer's original `zero_output_for_mask` value. @@ -32,8 +32,8 @@ class Bidirectional(Wrapper): will be combined. One of `{"sum", "mul", "concat", "ave", None}`. If `None`, the outputs will not be combined, they will be returned as a list. Defaults to `"concat"`. - backward_layer: Optional `keras_core.layers.RNN`, - or `keras_core.layers.Layer` instance to be used to handle + backward_layer: Optional `keras.layers.RNN`, + or `keras.layers.Layer` instance to be used to handle backwards input processing. If `backward_layer` is not provided, the layer instance passed as the `layer` argument will be used to generate the backward layer @@ -92,11 +92,11 @@ class Bidirectional(Wrapper): if not isinstance(layer, Layer): raise ValueError( "Please initialize `Bidirectional` layer with a " - f"`keras_core.layers.Layer` instance. Received: {layer}" + f"`keras.layers.Layer` instance. Received: {layer}" ) if backward_layer is not None and not isinstance(backward_layer, Layer): raise ValueError( - "`backward_layer` need to be a `keras_core.layers.Layer` " + "`backward_layer` need to be a `keras.layers.Layer` " f"instance. Received: {backward_layer}" ) if merge_mode not in ["sum", "mul", "ave", "concat", None]: diff --git a/keras_core/layers/rnn/bidirectional_test.py b/keras/layers/rnn/bidirectional_test.py similarity index 98% rename from keras_core/layers/rnn/bidirectional_test.py rename to keras/layers/rnn/bidirectional_test.py index 0f326a211..007480653 100644 --- a/keras_core/layers/rnn/bidirectional_test.py +++ b/keras/layers/rnn/bidirectional_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import initializers +from keras import layers +from keras import testing class SimpleRNNTest(testing.TestCase): diff --git a/keras_core/layers/rnn/conv_lstm.py b/keras/layers/rnn/conv_lstm.py similarity index 98% rename from keras_core/layers/rnn/conv_lstm.py rename to keras/layers/rnn/conv_lstm.py index d60cbba69..2c20bdd05 100644 --- a/keras_core/layers/rnn/conv_lstm.py +++ b/keras/layers/rnn/conv_lstm.py @@ -1,17 +1,17 @@ import tree -from keras_core import activations -from keras_core import backend -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras_core.layers.rnn.rnn import RNN -from keras_core.ops import operation_utils -from keras_core.utils import argument_validation +from keras import activations +from keras import backend +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.layers.rnn.rnn import RNN +from keras.ops import operation_utils +from keras.utils import argument_validation class ConvLSTMCell(Layer, DropoutRNNCell): diff --git a/keras_core/layers/rnn/conv_lstm1d.py b/keras/layers/rnn/conv_lstm1d.py similarity index 98% rename from keras_core/layers/rnn/conv_lstm1d.py rename to keras/layers/rnn/conv_lstm1d.py index c23ac3756..a96cdbe77 100644 --- a/keras_core/layers/rnn/conv_lstm1d.py +++ b/keras/layers/rnn/conv_lstm1d.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.rnn.conv_lstm import ConvLSTM +from keras.api_export import keras_export +from keras.layers.rnn.conv_lstm import ConvLSTM -@keras_core_export("keras_core.layers.ConvLSTM1D") +@keras_export("keras.layers.ConvLSTM1D") class ConvLSTM1D(ConvLSTM): """1D Convolutional LSTM. diff --git a/keras_core/layers/rnn/conv_lstm1d_test.py b/keras/layers/rnn/conv_lstm1d_test.py similarity index 95% rename from keras_core/layers/rnn/conv_lstm1d_test.py rename to keras/layers/rnn/conv_lstm1d_test.py index 948b637c9..c96166178 100644 --- a/keras_core/layers/rnn/conv_lstm1d_test.py +++ b/keras/layers/rnn/conv_lstm1d_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import initializers +from keras import layers +from keras import testing class ConvLSTM1DTest(testing.TestCase): diff --git a/keras_core/layers/rnn/conv_lstm2d.py b/keras/layers/rnn/conv_lstm2d.py similarity index 98% rename from keras_core/layers/rnn/conv_lstm2d.py rename to keras/layers/rnn/conv_lstm2d.py index ce31928fb..1819e5a9b 100644 --- a/keras_core/layers/rnn/conv_lstm2d.py +++ b/keras/layers/rnn/conv_lstm2d.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.rnn.conv_lstm import ConvLSTM +from keras.api_export import keras_export +from keras.layers.rnn.conv_lstm import ConvLSTM -@keras_core_export("keras_core.layers.ConvLSTM2D") +@keras_export("keras.layers.ConvLSTM2D") class ConvLSTM2D(ConvLSTM): """2D Convolutional LSTM. diff --git a/keras_core/layers/rnn/conv_lstm2d_test.py b/keras/layers/rnn/conv_lstm2d_test.py similarity index 96% rename from keras_core/layers/rnn/conv_lstm2d_test.py rename to keras/layers/rnn/conv_lstm2d_test.py index 88e825071..263b48808 100644 --- a/keras_core/layers/rnn/conv_lstm2d_test.py +++ b/keras/layers/rnn/conv_lstm2d_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import initializers +from keras import layers +from keras import testing class ConvLSTM2DTest(testing.TestCase): diff --git a/keras_core/layers/rnn/conv_lstm3d.py b/keras/layers/rnn/conv_lstm3d.py similarity index 98% rename from keras_core/layers/rnn/conv_lstm3d.py rename to keras/layers/rnn/conv_lstm3d.py index 2a9690e1c..e22bb2cc6 100644 --- a/keras_core/layers/rnn/conv_lstm3d.py +++ b/keras/layers/rnn/conv_lstm3d.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.rnn.conv_lstm import ConvLSTM +from keras.api_export import keras_export +from keras.layers.rnn.conv_lstm import ConvLSTM -@keras_core_export("keras_core.layers.ConvLSTM3D") +@keras_export("keras.layers.ConvLSTM3D") class ConvLSTM3D(ConvLSTM): """3D Convolutional LSTM. diff --git a/keras_core/layers/rnn/conv_lstm3d_test.py b/keras/layers/rnn/conv_lstm3d_test.py similarity index 97% rename from keras_core/layers/rnn/conv_lstm3d_test.py rename to keras/layers/rnn/conv_lstm3d_test.py index 7d5a33420..07c05b8e2 100644 --- a/keras_core/layers/rnn/conv_lstm3d_test.py +++ b/keras/layers/rnn/conv_lstm3d_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import initializers +from keras import layers +from keras import testing class ConvLSTM1DTest(testing.TestCase): diff --git a/keras_core/layers/rnn/conv_lstm_test.py b/keras/layers/rnn/conv_lstm_test.py similarity index 88% rename from keras_core/layers/rnn/conv_lstm_test.py rename to keras/layers/rnn/conv_lstm_test.py index 13c403811..fed45cde4 100644 --- a/keras_core/layers/rnn/conv_lstm_test.py +++ b/keras/layers/rnn/conv_lstm_test.py @@ -1,10 +1,10 @@ import numpy as np -from keras_core import backend -from keras_core import initializers -from keras_core import testing -from keras_core.layers.rnn.conv_lstm import ConvLSTM -from keras_core.layers.rnn.conv_lstm import ConvLSTMCell +from keras import backend +from keras import initializers +from keras import testing +from keras.layers.rnn.conv_lstm import ConvLSTM +from keras.layers.rnn.conv_lstm import ConvLSTMCell class ConvLSTMCellTest(testing.TestCase): diff --git a/keras_core/layers/rnn/dropout_rnn_cell.py b/keras/layers/rnn/dropout_rnn_cell.py similarity index 97% rename from keras_core/layers/rnn/dropout_rnn_cell.py rename to keras/layers/rnn/dropout_rnn_cell.py index 709e351e5..5b0b83ac8 100644 --- a/keras_core/layers/rnn/dropout_rnn_cell.py +++ b/keras/layers/rnn/dropout_rnn_cell.py @@ -1,5 +1,5 @@ -from keras_core import backend -from keras_core import ops +from keras import backend +from keras import ops class DropoutRNNCell: diff --git a/keras_core/layers/rnn/dropout_rnn_cell_test.py b/keras/layers/rnn/dropout_rnn_cell_test.py similarity index 91% rename from keras_core/layers/rnn/dropout_rnn_cell_test.py rename to keras/layers/rnn/dropout_rnn_cell_test.py index 6b000e894..927fd627c 100644 --- a/keras_core/layers/rnn/dropout_rnn_cell_test.py +++ b/keras/layers/rnn/dropout_rnn_cell_test.py @@ -1,10 +1,10 @@ import pytest -from keras_core import backend -from keras_core import layers -from keras_core import ops -from keras_core import testing -from keras_core.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras import backend +from keras import layers +from keras import ops +from keras import testing +from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell class RNNCellWithDropout(layers.Layer, DropoutRNNCell): diff --git a/keras_core/layers/rnn/gru.py b/keras/layers/rnn/gru.py similarity index 96% rename from keras_core/layers/rnn/gru.py rename to keras/layers/rnn/gru.py index cbee61e62..db28930b2 100644 --- a/keras_core/layers/rnn/gru.py +++ b/keras/layers/rnn/gru.py @@ -1,24 +1,24 @@ import tree -from keras_core import activations -from keras_core import backend -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras_core.layers.rnn.rnn import RNN +from keras import activations +from keras import backend +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.layers.rnn.rnn import RNN -@keras_core_export("keras_core.layers.GRUCell") +@keras_export("keras.layers.GRUCell") class GRUCell(Layer, DropoutRNNCell): """Cell class for the GRU layer. This class processes one step within the whole time sequence input, whereas - `keras_core.layer.GRU` processes the whole sequence. + `keras.layer.GRU` processes the whole sequence. Args: units: Positive integer, dimensionality of the output space. @@ -69,12 +69,12 @@ class GRUCell(Layer, DropoutRNNCell): Example: >>> inputs = np.random.random((32, 10, 8)) - >>> rnn = keras_core.layers.RNN(keras_core.layers.GRUCell(4)) + >>> rnn = keras.layers.RNN(keras.layers.GRUCell(4)) >>> output = rnn(inputs) >>> output.shape (32, 4) - >>> rnn = keras_core.layers.RNN( - ... keras_core.layers.GRUCell(4), + >>> rnn = keras.layers.RNN( + ... keras.layers.GRUCell(4), ... return_sequences=True, ... return_state=True) >>> whole_sequence_output, final_state = rnn(inputs) @@ -328,7 +328,7 @@ class GRUCell(Layer, DropoutRNNCell): ] -@keras_core_export("keras_core.layers.GRU") +@keras_export("keras.layers.GRU") class GRU(RNN): """Gated Recurrent Unit - Cho et al. 2014. @@ -363,11 +363,11 @@ class GRU(RNN): For example: >>> inputs = np.random.random((32, 10, 8)) - >>> gru = keras_core.layers.GRU(4) + >>> gru = keras.layers.GRU(4) >>> output = gru(inputs) >>> output.shape (32, 4) - >>> gru = keras_core.layers.GRU(4, return_sequences=True, return_state=True) + >>> gru = keras.layers.GRU(4, return_sequences=True, return_state=True) >>> whole_sequence_output, final_state = gru(inputs) >>> whole_sequence_output.shape (32, 10, 4) diff --git a/keras_core/layers/rnn/gru_test.py b/keras/layers/rnn/gru_test.py similarity index 98% rename from keras_core/layers/rnn/gru_test.py rename to keras/layers/rnn/gru_test.py index 16fab6c35..14f5248d7 100644 --- a/keras_core/layers/rnn/gru_test.py +++ b/keras/layers/rnn/gru_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import initializers +from keras import layers +from keras import testing class GRUTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/rnn/lstm.py b/keras/layers/rnn/lstm.py similarity index 96% rename from keras_core/layers/rnn/lstm.py rename to keras/layers/rnn/lstm.py index 9ce9512cb..73ed18aac 100644 --- a/keras_core/layers/rnn/lstm.py +++ b/keras/layers/rnn/lstm.py @@ -1,24 +1,24 @@ import tree -from keras_core import activations -from keras_core import backend -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras_core.layers.rnn.rnn import RNN +from keras import activations +from keras import backend +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.layers.rnn.rnn import RNN -@keras_core_export("keras_core.layers.LSTMCell") +@keras_export("keras.layers.LSTMCell") class LSTMCell(Layer, DropoutRNNCell): """Cell class for the LSTM layer. This class processes one step within the whole time sequence input, whereas - `keras_core.layer.LSTM` processes the whole sequence. + `keras.layer.LSTM` processes the whole sequence. Args: units: Positive integer, dimensionality of the output space. @@ -71,12 +71,12 @@ class LSTMCell(Layer, DropoutRNNCell): Example: >>> inputs = np.random.random((32, 10, 8)) - >>> rnn = keras_core.layers.RNN(keras_core.layers.LSTMCell(4)) + >>> rnn = keras.layers.RNN(keras.layers.LSTMCell(4)) >>> output = rnn(inputs) >>> output.shape (32, 4) - >>> rnn = keras_core.layers.RNN( - ... keras_core.layers.LSTMCell(4), + >>> rnn = keras.layers.RNN( + ... keras.layers.LSTMCell(4), ... return_sequences=True, ... return_state=True) >>> whole_sequence_output, final_state = rnn(inputs) @@ -316,7 +316,7 @@ class LSTMCell(Layer, DropoutRNNCell): ] -@keras_core_export("keras_core.layers.LSTM") +@keras_export("keras.layers.LSTM") class LSTM(RNN): """Long Short-Term Memory layer - Hochreiter 1997. @@ -339,11 +339,11 @@ class LSTM(RNN): For example: >>> inputs = np.random.random((32, 10, 8)) - >>> lstm = keras_core.layers.LSTM(4) + >>> lstm = keras.layers.LSTM(4) >>> output = lstm(inputs) >>> output.shape (32, 4) - >>> lstm = keras_core.layers.LSTM( + >>> lstm = keras.layers.LSTM( ... 4, return_sequences=True, return_state=True) >>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs) >>> whole_seq_output.shape diff --git a/keras_core/layers/rnn/lstm_test.py b/keras/layers/rnn/lstm_test.py similarity index 99% rename from keras_core/layers/rnn/lstm_test.py rename to keras/layers/rnn/lstm_test.py index 95d0af326..8811cc760 100644 --- a/keras_core/layers/rnn/lstm_test.py +++ b/keras/layers/rnn/lstm_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import initializers +from keras import layers +from keras import testing class LSTMTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/layers/rnn/rnn.py b/keras/layers/rnn/rnn.py similarity index 96% rename from keras_core/layers/rnn/rnn.py rename to keras/layers/rnn/rnn.py index e41e299f1..b8c0d93c4 100644 --- a/keras_core/layers/rnn/rnn.py +++ b/keras/layers/rnn/rnn.py @@ -1,16 +1,16 @@ import tree -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras_core.layers.rnn.stacked_rnn_cells import StackedRNNCells -from keras_core.saving import serialization_lib -from keras_core.utils import tracking +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.saving import serialization_lib +from keras.utils import tracking -@keras_core_export("keras_core.layers.RNN") +@keras_export("keras.layers.RNN") class RNN(Layer): """Base class for recurrent layers. @@ -93,7 +93,7 @@ class RNN(Layer): This layer supports masking for input data with a variable number of timesteps. To introduce masks to your data, - use a `keras_core.layers.Embedding` layer with the `mask_zero` parameter + use a `keras.layers.Embedding` layer with the `mask_zero` parameter set to `True`. Note on using statefulness in RNNs: @@ -134,11 +134,11 @@ class RNN(Layer): Examples: ```python - from keras_core.layers import RNN - from keras_core import ops + from keras.layers import RNN + from keras import ops # First, let's define a RNN Cell, as a layer subclass. - class MinimalRNNCell(keras_core.layers.Layer): + class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): super().__init__(**kwargs) @@ -164,14 +164,14 @@ class RNN(Layer): # Let's use this cell in a RNN layer: cell = MinimalRNNCell(32) - x = keras_core.Input((None, 5)) + x = keras.Input((None, 5)) layer = RNN(cell) y = layer(x) # Here's how to use the cell to build a stacked RNN: cells = [MinimalRNNCell(32), MinimalRNNCell(64)] - x = keras_core.Input((None, 5)) + x = keras.Input((None, 5)) layer = RNN(cells) y = layer(x) ``` diff --git a/keras_core/layers/rnn/rnn_test.py b/keras/layers/rnn/rnn_test.py similarity index 99% rename from keras_core/layers/rnn/rnn_test.py rename to keras/layers/rnn/rnn_test.py index 8ddcc6346..dcfa075d7 100644 --- a/keras_core/layers/rnn/rnn_test.py +++ b/keras/layers/rnn/rnn_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import layers +from keras import ops +from keras import testing class OneStateRNNCell(layers.Layer): diff --git a/keras_core/layers/rnn/simple_rnn.py b/keras/layers/rnn/simple_rnn.py similarity index 95% rename from keras_core/layers/rnn/simple_rnn.py rename to keras/layers/rnn/simple_rnn.py index d7654bb0e..0225c309b 100644 --- a/keras_core/layers/rnn/simple_rnn.py +++ b/keras/layers/rnn/simple_rnn.py @@ -1,22 +1,22 @@ -from keras_core import activations -from keras_core import backend -from keras_core import constraints -from keras_core import initializers -from keras_core import ops -from keras_core import regularizers -from keras_core.api_export import keras_core_export -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras_core.layers.rnn.rnn import RNN +from keras import activations +from keras import backend +from keras import constraints +from keras import initializers +from keras import ops +from keras import regularizers +from keras.api_export import keras_export +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.layers.rnn.rnn import RNN -@keras_core_export("keras_core.layers.SimpleRNNCell") +@keras_export("keras.layers.SimpleRNNCell") class SimpleRNNCell(Layer, DropoutRNNCell): """Cell class for SimpleRNN. This class processes one step within the whole time sequence input, whereas - `keras_core.layer.SimpleRNN` processes the whole sequence. + `keras.layer.SimpleRNN` processes the whole sequence. Args: units: Positive integer, dimensionality of the output space. @@ -63,10 +63,10 @@ class SimpleRNNCell(Layer, DropoutRNNCell): ```python inputs = np.random.random([32, 10, 8]).astype(np.float32) - rnn = keras_core.layers.RNN(keras_core.layers.SimpleRNNCell(4)) + rnn = keras.layers.RNN(keras.layers.SimpleRNNCell(4)) output = rnn(inputs) # The output has shape `(32, 4)`. - rnn = keras_core.layers.RNN( - keras_core.layers.SimpleRNNCell(4), + rnn = keras.layers.RNN( + keras.layers.SimpleRNNCell(4), return_sequences=True, return_state=True ) @@ -209,7 +209,7 @@ class SimpleRNNCell(Layer, DropoutRNNCell): return {**base_config, **config} -@keras_core_export("keras_core.layers.SimpleRNN") +@keras_export("keras.layers.SimpleRNN") class SimpleRNN(RNN): """Fully-connected RNN where the output is to be fed back as the new input. @@ -283,9 +283,9 @@ class SimpleRNN(RNN): ```python inputs = np.random.random((32, 10, 8)) - simple_rnn = keras_core.layers.SimpleRNN(4) + simple_rnn = keras.layers.SimpleRNN(4) output = simple_rnn(inputs) # The output has shape `(32, 4)`. - simple_rnn = keras_core.layers.SimpleRNN( + simple_rnn = keras.layers.SimpleRNN( 4, return_sequences=True, return_state=True ) # whole_sequence_output has shape `(32, 10, 4)`. diff --git a/keras_core/layers/rnn/simple_rnn_test.py b/keras/layers/rnn/simple_rnn_test.py similarity index 98% rename from keras_core/layers/rnn/simple_rnn_test.py rename to keras/layers/rnn/simple_rnn_test.py index 6d99e0b03..19bf67d32 100644 --- a/keras_core/layers/rnn/simple_rnn_test.py +++ b/keras/layers/rnn/simple_rnn_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras_core import initializers -from keras_core import layers -from keras_core import testing +from keras import initializers +from keras import layers +from keras import testing class SimpleRNNTest(testing.TestCase): diff --git a/keras_core/layers/rnn/stacked_rnn_cells.py b/keras/layers/rnn/stacked_rnn_cells.py similarity index 91% rename from keras_core/layers/rnn/stacked_rnn_cells.py rename to keras/layers/rnn/stacked_rnn_cells.py index f4e257628..fb28a492b 100644 --- a/keras_core/layers/rnn/stacked_rnn_cells.py +++ b/keras/layers/rnn/stacked_rnn_cells.py @@ -1,12 +1,12 @@ import tree -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.saving import serialization_lib +from keras import ops +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.saving import serialization_lib -@keras_core_export("keras_core.layers.StackedRNNCells") +@keras_export("keras.layers.StackedRNNCells") class StackedRNNCells(Layer): """Wrapper allowing a stack of RNN cells to behave as a single cell. @@ -24,9 +24,9 @@ class StackedRNNCells(Layer): new_shape = (batch_size, sentence_length, num_features) x = np.reshape(np.arange(30), new_shape) - rnn_cells = [keras_core.layers.LSTMCell(128) for _ in range(2)] - stacked_lstm = keras_core.layers.StackedRNNCells(rnn_cells) - lstm_layer = keras_core.layers.RNN(stacked_lstm) + rnn_cells = [keras.layers.LSTMCell(128) for _ in range(2)] + stacked_lstm = keras.layers.StackedRNNCells(rnn_cells) + lstm_layer = keras.layers.RNN(stacked_lstm) result = lstm_layer(x) ``` diff --git a/keras_core/layers/rnn/stacked_rnn_cells_test.py b/keras/layers/rnn/stacked_rnn_cells_test.py similarity index 98% rename from keras_core/layers/rnn/stacked_rnn_cells_test.py rename to keras/layers/rnn/stacked_rnn_cells_test.py index edb51fbef..b59a278e2 100644 --- a/keras_core/layers/rnn/stacked_rnn_cells_test.py +++ b/keras/layers/rnn/stacked_rnn_cells_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras_core import layers -from keras_core import testing -from keras_core.layers.rnn.rnn_test import OneStateRNNCell -from keras_core.layers.rnn.rnn_test import TwoStatesRNNCell +from keras import layers +from keras import testing +from keras.layers.rnn.rnn_test import OneStateRNNCell +from keras.layers.rnn.rnn_test import TwoStatesRNNCell class StackedRNNTest(testing.TestCase): diff --git a/keras_core/layers/rnn/time_distributed.py b/keras/layers/rnn/time_distributed.py similarity index 92% rename from keras_core/layers/rnn/time_distributed.py rename to keras/layers/rnn/time_distributed.py index e30bdf95b..d1cc613eb 100644 --- a/keras_core/layers/rnn/time_distributed.py +++ b/keras/layers/rnn/time_distributed.py @@ -1,13 +1,13 @@ """Wrapper layer to apply every temporal slice of an input.""" -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.core.wrapper import Wrapper -from keras_core.layers.layer import Layer +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.layers.core.wrapper import Wrapper +from keras.layers.layer import Layer -@keras_core_export("keras_core.layers.TimeDistributed") +@keras_export("keras.layers.TimeDistributed") class TimeDistributed(Wrapper): """This wrapper allows to apply a layer to every temporal slice of an input. @@ -31,7 +31,7 @@ class TimeDistributed(Wrapper): the timestamps, the same set of weights are used at each timestamp. Args: - layer: a `keras_core.layers.Layer` instance. + layer: a `keras.layers.Layer` instance. Call arguments: inputs: Input tensor of shape (batch, time, ...) or nested tensors, @@ -48,7 +48,7 @@ class TimeDistributed(Wrapper): if not isinstance(layer, Layer): raise ValueError( "Please initialize `TimeDistributed` layer with a " - f"`keras_core.layers.Layer` instance. Received: {layer}" + f"`keras.layers.Layer` instance. Received: {layer}" ) super().__init__(layer, **kwargs) self.supports_masking = True diff --git a/keras_core/layers/rnn/time_distributed_test.py b/keras/layers/rnn/time_distributed_test.py similarity index 95% rename from keras_core/layers/rnn/time_distributed_test.py rename to keras/layers/rnn/time_distributed_test.py index 312afd88f..f91d3fea1 100644 --- a/keras_core/layers/rnn/time_distributed_test.py +++ b/keras/layers/rnn/time_distributed_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras_core import initializers -from keras_core import layers -from keras_core import ops -from keras_core import testing +from keras import initializers +from keras import layers +from keras import ops +from keras import testing class TimeDistributedTest(testing.TestCase): diff --git a/keras_core/legacy/__init__.py b/keras/legacy/__init__.py similarity index 100% rename from keras_core/legacy/__init__.py rename to keras/legacy/__init__.py diff --git a/keras_core/legacy/backend.py b/keras/legacy/backend.py similarity index 89% rename from keras_core/legacy/backend.py rename to keras/legacy/backend.py index 29b5562f3..5cfd2208a 100644 --- a/keras_core/legacy/backend.py +++ b/keras/legacy/backend.py @@ -4,47 +4,47 @@ import itertools import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.utils.module_utils import tensorflow as tf py_any = any py_all = all -@keras_core_export("keras_core._legacy.backend.abs") +@keras_export("keras._legacy.backend.abs") def abs(x): """DEPRECATED.""" return tf.abs(x) -@keras_core_export("keras_core._legacy.backend.all") +@keras_export("keras._legacy.backend.all") def all(x, axis=None, keepdims=False): """DEPRECATED.""" x = tf.cast(x, tf.bool) return tf.reduce_all(x, axis, keepdims) -@keras_core_export("keras_core._legacy.backend.any") +@keras_export("keras._legacy.backend.any") def any(x, axis=None, keepdims=False): """DEPRECATED.""" x = tf.cast(x, tf.bool) return tf.reduce_any(x, axis, keepdims) -@keras_core_export("keras_core._legacy.backend.argmax") +@keras_export("keras._legacy.backend.argmax") def argmax(x, axis=-1): """DEPRECATED.""" return tf.argmax(x, axis) -@keras_core_export("keras_core._legacy.backend.argmin") +@keras_export("keras._legacy.backend.argmin") def argmin(x, axis=-1): """DEPRECATED.""" return tf.argmin(x, axis) -@keras_core_export("keras_core._legacy.backend.arange") +@keras_export("keras._legacy.backend.arange") def arange(start, stop=None, step=1, dtype="int32"): """DEPRECATED.""" if stop is None and start < 0: @@ -55,7 +55,7 @@ def arange(start, stop=None, step=1, dtype="int32"): return result -@keras_core_export("keras_core._legacy.backend.batch_dot") +@keras_export("keras._legacy.backend.batch_dot") def batch_dot(x, y, axes=None): """DEPRECATED.""" x_shape = x.shape @@ -217,20 +217,20 @@ def batch_dot(x, y, axes=None): return result -@keras_core_export("keras_core._legacy.backend.batch_flatten") +@keras_export("keras._legacy.backend.batch_flatten") def batch_flatten(x): """DEPRECATED.""" x = tf.reshape(x, tf.stack([-1, prod(tf.shape(x)[1:])])) return x -@keras_core_export("keras_core._legacy.backend.batch_get_value") +@keras_export("keras._legacy.backend.batch_get_value") def batch_get_value(tensors): """DEPRECATED.""" return [x.numpy() for x in tensors] -@keras_core_export("keras_core._legacy.backend.batch_set_value") +@keras_export("keras._legacy.backend.batch_set_value") def batch_set_value(tuples): """DEPRECATED.""" if tf.executing_eagerly() or tf.inside_function(): @@ -239,13 +239,13 @@ def batch_set_value(tuples): x.assign(value) -@keras_core_export("keras_core._legacy.backend.batch_normalization") +@keras_export("keras._legacy.backend.batch_normalization") def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3): """DEPRECATED.""" return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) -@keras_core_export("keras_core._legacy.backend.bias_add") +@keras_export("keras._legacy.backend.bias_add") def bias_add(x, bias, data_format=None): """DEPRECATED.""" if data_format is None: @@ -271,7 +271,7 @@ def bias_add(x, bias, data_format=None): return tf.nn.bias_add(x, bias) -@keras_core_export("keras_core._legacy.backend.binary_crossentropy") +@keras_export("keras._legacy.backend.binary_crossentropy") def binary_crossentropy(target, output, from_logits=False): """DEPRECATED.""" target = tf.convert_to_tensor(target) @@ -291,7 +291,7 @@ def binary_crossentropy(target, output, from_logits=False): return -bce -@keras_core_export("keras_core._legacy.backend.binary_focal_crossentropy") +@keras_export("keras._legacy.backend.binary_focal_crossentropy") def binary_focal_crossentropy( target, output, @@ -323,13 +323,13 @@ def binary_focal_crossentropy( return focal_bce -@keras_core_export("keras_core._legacy.backend.cast") +@keras_export("keras._legacy.backend.cast") def cast(x, dtype): """DEPRECATED.""" return tf.cast(x, dtype) -@keras_core_export("keras_core._legacy.backend.cast_to_floatx") +@keras_export("keras._legacy.backend.cast_to_floatx") def cast_to_floatx(x): """DEPRECATED.""" if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)): @@ -337,7 +337,7 @@ def cast_to_floatx(x): return np.asarray(x, dtype=backend.floatx()) -@keras_core_export("keras_core._legacy.backend.categorical_crossentropy") +@keras_export("keras._legacy.backend.categorical_crossentropy") def categorical_crossentropy(target, output, from_logits=False, axis=-1): """DEPRECATED.""" target = tf.convert_to_tensor(target) @@ -361,7 +361,7 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1): return -tf.reduce_sum(target * tf.math.log(output), axis) -@keras_core_export("keras_core._legacy.backend.categorical_focal_crossentropy") +@keras_export("keras._legacy.backend.categorical_focal_crossentropy") def categorical_focal_crossentropy( target, output, @@ -400,7 +400,7 @@ def categorical_focal_crossentropy( return focal_cce -@keras_core_export("keras_core._legacy.backend.clip") +@keras_export("keras._legacy.backend.clip") def clip(x, min_value, max_value): """DEPRECATED.""" if isinstance(min_value, (int, float)) and isinstance( @@ -415,7 +415,7 @@ def clip(x, min_value, max_value): return tf.clip_by_value(x, min_value, max_value) -@keras_core_export("keras_core._legacy.backend.concatenate") +@keras_export("keras._legacy.backend.concatenate") def concatenate(tensors, axis=-1): """DEPRECATED.""" if axis < 0: @@ -433,7 +433,7 @@ def concatenate(tensors, axis=-1): return tf.concat([to_dense(x) for x in tensors], axis) -@keras_core_export("keras_core._legacy.backend.constant") +@keras_export("keras._legacy.backend.constant") def constant(value, dtype=None, shape=None, name=None): """DEPRECATED.""" if dtype is None: @@ -476,7 +476,7 @@ def _preprocess_padding(padding): return padding -@keras_core_export("keras_core._legacy.backend.conv1d") +@keras_export("keras._legacy.backend.conv1d") def conv1d( x, kernel, strides=1, padding="valid", data_format=None, dilation_rate=1 ): @@ -508,7 +508,7 @@ def conv1d( return x -@keras_core_export("keras_core._legacy.backend.conv2d") +@keras_export("keras._legacy.backend.conv2d") def conv2d( x, kernel, @@ -538,7 +538,7 @@ def conv2d( return x -@keras_core_export("keras_core._legacy.backend.conv2d_transpose") +@keras_export("keras._legacy.backend.conv2d_transpose") def conv2d_transpose( x, kernel, @@ -607,7 +607,7 @@ def conv2d_transpose( return x -@keras_core_export("keras_core._legacy.backend.conv3d") +@keras_export("keras._legacy.backend.conv3d") def conv3d( x, kernel, @@ -637,19 +637,19 @@ def conv3d( return x -@keras_core_export("keras_core._legacy.backend.cos") +@keras_export("keras._legacy.backend.cos") def cos(x): """DEPRECATED.""" return tf.cos(x) -@keras_core_export("keras_core._legacy.backend.count_params") +@keras_export("keras._legacy.backend.count_params") def count_params(x): """DEPRECATED.""" return np.prod(x.shape.as_list()) -@keras_core_export("keras_core._legacy.backend.ctc_batch_cost") +@keras_export("keras._legacy.backend.ctc_batch_cost") def ctc_batch_cost(y_true, y_pred, input_length, label_length): """DEPRECATED.""" label_length = tf.cast(tf.squeeze(label_length, axis=-1), tf.int32) @@ -670,7 +670,7 @@ def ctc_batch_cost(y_true, y_pred, input_length, label_length): ) -@keras_core_export("keras_core._legacy.backend.ctc_label_dense_to_sparse") +@keras_export("keras._legacy.backend.ctc_label_dense_to_sparse") def ctc_label_dense_to_sparse(labels, label_lengths): """DEPRECATED.""" label_shape = tf.shape(labels) @@ -711,7 +711,7 @@ def ctc_label_dense_to_sparse(labels, label_lengths): ) -@keras_core_export("keras_core._legacy.backend.ctc_decode") +@keras_export("keras._legacy.backend.ctc_decode") def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): """DEPRECATED.""" input_shape = tf.shape(y_pred) @@ -739,19 +739,19 @@ def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): return (decoded_dense, log_prob) -@keras_core_export("keras_core._legacy.backend.cumsum") +@keras_export("keras._legacy.backend.cumsum") def cumsum(x, axis=0): """DEPRECATED.""" return tf.cumsum(x, axis=axis) -@keras_core_export("keras_core._legacy.backend.cumprod") +@keras_export("keras._legacy.backend.cumprod") def cumprod(x, axis=0): """DEPRECATED.""" return tf.math.cumprod(x, axis=axis) -@keras_core_export("keras_core._legacy.backend.depthwise_conv2d") +@keras_export("keras._legacy.backend.depthwise_conv2d") def depthwise_conv2d( x, depthwise_kernel, @@ -786,7 +786,7 @@ def depthwise_conv2d( return x -@keras_core_export("keras_core._legacy.backend.dot") +@keras_export("keras._legacy.backend.dot") def dot(x, y): """DEPRECATED.""" if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2): @@ -818,7 +818,7 @@ def dot(x, y): return out -@keras_core_export("keras_core._legacy.backend.dropout") +@keras_export("keras._legacy.backend.dropout") def dropout(x, level, noise_shape=None, seed=None): """DEPRECATED.""" if seed is None: @@ -826,13 +826,13 @@ def dropout(x, level, noise_shape=None, seed=None): return tf.nn.dropout(x, rate=level, noise_shape=noise_shape, seed=seed) -@keras_core_export("keras_core._legacy.backend.dtype") +@keras_export("keras._legacy.backend.dtype") def dtype(x): """DEPRECATED.""" return x.dtype.base_dtype.name -@keras_core_export("keras_core._legacy.backend.elu") +@keras_export("keras._legacy.backend.elu") def elu(x, alpha=1.0): """DEPRECATED.""" res = tf.nn.elu(x) @@ -842,31 +842,31 @@ def elu(x, alpha=1.0): return tf.where(x > 0, res, alpha * res) -@keras_core_export("keras_core._legacy.backend.equal") +@keras_export("keras._legacy.backend.equal") def equal(x, y): """DEPRECATED.""" return tf.equal(x, y) -@keras_core_export("keras_core._legacy.backend.eval") +@keras_export("keras._legacy.backend.eval") def eval(x): """DEPRECATED.""" return get_value(to_dense(x)) -@keras_core_export("keras_core._legacy.backend.exp") +@keras_export("keras._legacy.backend.exp") def exp(x): """DEPRECATED.""" return tf.exp(x) -@keras_core_export("keras_core._legacy.backend.expand_dims") +@keras_export("keras._legacy.backend.expand_dims") def expand_dims(x, axis=-1): """DEPRECATED.""" return tf.expand_dims(x, axis) -@keras_core_export("keras_core._legacy.backend.eye") +@keras_export("keras._legacy.backend.eye") def eye(size, dtype=None, name=None): """DEPRECATED.""" if dtype is None: @@ -875,31 +875,31 @@ def eye(size, dtype=None, name=None): return variable(tf.eye(size, dtype=tf_dtype), dtype, name) -@keras_core_export("keras_core._legacy.backend.flatten") +@keras_export("keras._legacy.backend.flatten") def flatten(x): """DEPRECATED.""" return tf.reshape(x, [-1]) -@keras_core_export("keras_core._legacy.backend.foldl") +@keras_export("keras._legacy.backend.foldl") def foldl(fn, elems, initializer=None, name=None): """DEPRECATED.""" return tf.compat.v1.foldl(fn, elems, initializer=initializer, name=name) -@keras_core_export("keras_core._legacy.backend.foldr") +@keras_export("keras._legacy.backend.foldr") def foldr(fn, elems, initializer=None, name=None): """DEPRECATED.""" return tf.compat.v1.foldr(fn, elems, initializer=initializer, name=name) -@keras_core_export("keras_core._legacy.backend.gather") +@keras_export("keras._legacy.backend.gather") def gather(reference, indices): """DEPRECATED.""" return tf.compat.v1.gather(reference, indices) -@keras_core_export("keras_core._legacy.backend.get_value") +@keras_export("keras._legacy.backend.get_value") def get_value(x): """DEPRECATED.""" if not tf.is_tensor(x): @@ -915,7 +915,7 @@ def get_value(x): return x.numpy() -@keras_core_export("keras_core._legacy.backend.gradients") +@keras_export("keras._legacy.backend.gradients") def gradients(loss, variables): """DEPRECATED.""" return tf.compat.v1.gradients( @@ -923,19 +923,19 @@ def gradients(loss, variables): ) -@keras_core_export("keras_core._legacy.backend.greater") +@keras_export("keras._legacy.backend.greater") def greater(x, y): """DEPRECATED.""" return tf.greater(x, y) -@keras_core_export("keras_core._legacy.backend.greater_equal") +@keras_export("keras._legacy.backend.greater_equal") def greater_equal(x, y): """DEPRECATED.""" return tf.greater_equal(x, y) -@keras_core_export("keras_core._legacy.backend.hard_sigmoid") +@keras_export("keras._legacy.backend.hard_sigmoid") def hard_sigmoid(x): """DEPRECATED.""" point_two = tf.convert_to_tensor(0.2, dtype=x.dtype) @@ -946,13 +946,13 @@ def hard_sigmoid(x): return x -@keras_core_export("keras_core._legacy.backend.in_top_k") +@keras_export("keras._legacy.backend.in_top_k") def in_top_k(predictions, targets, k): """DEPRECATED.""" return tf.compat.v1.math.in_top_k(predictions, targets, k) -@keras_core_export("keras_core._legacy.backend.int_shape") +@keras_export("keras._legacy.backend.int_shape") def int_shape(x): """DEPRECATED.""" try: @@ -964,7 +964,7 @@ def int_shape(x): return None -@keras_core_export("keras_core._legacy.backend.is_sparse") +@keras_export("keras._legacy.backend.is_sparse") def is_sparse(tensor): """DEPRECATED.""" spec = getattr(tensor, "_type_spec", None) @@ -973,49 +973,49 @@ def is_sparse(tensor): return isinstance(tensor, tf.SparseTensor) -@keras_core_export("keras_core._legacy.backend.l2_normalize") +@keras_export("keras._legacy.backend.l2_normalize") def l2_normalize(x, axis=None): """DEPRECATED.""" return tf.linalg.l2_normalize(x, axis=axis) -@keras_core_export("keras_core._legacy.backend.less") +@keras_export("keras._legacy.backend.less") def less(x, y): """DEPRECATED.""" return tf.less(x, y) -@keras_core_export("keras_core._legacy.backend.less_equal") +@keras_export("keras._legacy.backend.less_equal") def less_equal(x, y): """DEPRECATED.""" return tf.less_equal(x, y) -@keras_core_export("keras_core._legacy.backend.log") +@keras_export("keras._legacy.backend.log") def log(x): """DEPRECATED.""" return tf.math.log(x) -@keras_core_export("keras_core._legacy.backend.map_fn") +@keras_export("keras._legacy.backend.map_fn") def map_fn(fn, elems, name=None, dtype=None): """DEPRECATED.""" return tf.compat.v1.map_fn(fn, elems, name=name, dtype=dtype) -@keras_core_export("keras_core._legacy.backend.max") +@keras_export("keras._legacy.backend.max") def max(x, axis=None, keepdims=False): """DEPRECATED.""" return tf.reduce_max(x, axis, keepdims) -@keras_core_export("keras_core._legacy.backend.maximum") +@keras_export("keras._legacy.backend.maximum") def maximum(x, y): """DEPRECATED.""" return tf.maximum(x, y) -@keras_core_export("keras_core._legacy.backend.mean") +@keras_export("keras._legacy.backend.mean") def mean(x, axis=None, keepdims=False): """DEPRECATED.""" if x.dtype.base_dtype == tf.bool: @@ -1023,19 +1023,19 @@ def mean(x, axis=None, keepdims=False): return tf.reduce_mean(x, axis, keepdims) -@keras_core_export("keras_core._legacy.backend.min") +@keras_export("keras._legacy.backend.min") def min(x, axis=None, keepdims=False): """DEPRECATED.""" return tf.reduce_min(x, axis, keepdims) -@keras_core_export("keras_core._legacy.backend.minimum") +@keras_export("keras._legacy.backend.minimum") def minimum(x, y): """DEPRECATED.""" return tf.minimum(x, y) -@keras_core_export("keras_core._legacy.backend.moving_average_update") +@keras_export("keras._legacy.backend.moving_average_update") def moving_average_update(x, value, momentum): """DEPRECATED.""" momentum = tf.cast(momentum, x.dtype) @@ -1043,31 +1043,31 @@ def moving_average_update(x, value, momentum): return x.assign_sub((x - value) * (1 - momentum)) -@keras_core_export("keras_core._legacy.backend.name_scope") +@keras_export("keras._legacy.backend.name_scope") def name_scope(name): """DEPRECATED.""" return tf.name_scope(name) -@keras_core_export("keras_core._legacy.backend.ndim") +@keras_export("keras._legacy.backend.ndim") def ndim(x): """DEPRECATED.""" return x.shape.rank -@keras_core_export("keras_core._legacy.backend.not_equal") +@keras_export("keras._legacy.backend.not_equal") def not_equal(x, y): """DEPRECATED.""" return tf.not_equal(x, y) -@keras_core_export("keras_core._legacy.backend.one_hot") +@keras_export("keras._legacy.backend.one_hot") def one_hot(indices, num_classes): """DEPRECATED.""" return tf.one_hot(indices, depth=num_classes, axis=-1) -@keras_core_export("keras_core._legacy.backend.ones") +@keras_export("keras._legacy.backend.ones") def ones(shape, dtype=None, name=None): """DEPRECATED.""" with tf.init_scope(): @@ -1080,19 +1080,19 @@ def ones(shape, dtype=None, name=None): return v -@keras_core_export("keras_core._legacy.backend.ones_like") +@keras_export("keras._legacy.backend.ones_like") def ones_like(x, dtype=None, name=None): """DEPRECATED.""" return tf.ones_like(x, dtype=dtype, name=name) -@keras_core_export("keras_core._legacy.backend.permute_dimensions") +@keras_export("keras._legacy.backend.permute_dimensions") def permute_dimensions(x, pattern): """DEPRECATED.""" return tf.transpose(x, perm=pattern) -@keras_core_export("keras_core._legacy.backend.pool2d") +@keras_export("keras._legacy.backend.pool2d") def pool2d( x, pool_size, @@ -1136,7 +1136,7 @@ def pool2d( return x -@keras_core_export("keras_core._legacy.backend.pool3d") +@keras_export("keras._legacy.backend.pool3d") def pool3d( x, pool_size, @@ -1176,19 +1176,19 @@ def pool3d( return x -@keras_core_export("keras_core._legacy.backend.pow") +@keras_export("keras._legacy.backend.pow") def pow(x, a): """DEPRECATED.""" return tf.pow(x, a) -@keras_core_export("keras_core._legacy.backend.prod") +@keras_export("keras._legacy.backend.prod") def prod(x, axis=None, keepdims=False): """DEPRECATED.""" return tf.reduce_prod(x, axis, keepdims) -@keras_core_export("keras_core._legacy.backend.random_bernoulli") +@keras_export("keras._legacy.backend.random_bernoulli") def random_bernoulli(shape, p=0.0, dtype=None, seed=None): """DEPRECATED.""" if dtype is None: @@ -1202,7 +1202,7 @@ def random_bernoulli(shape, p=0.0, dtype=None, seed=None): ) -@keras_core_export("keras_core._legacy.backend.random_normal") +@keras_export("keras._legacy.backend.random_normal") def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """DEPRECATED.""" if dtype is None: @@ -1214,7 +1214,7 @@ def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): ) -@keras_core_export("keras_core._legacy.backend.random_normal_variable") +@keras_export("keras._legacy.backend.random_normal_variable") def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None ): @@ -1231,7 +1231,7 @@ def random_normal_variable( return variable(value, dtype=dtype, name=name) -@keras_core_export("keras_core._legacy.backend.random_uniform") +@keras_export("keras._legacy.backend.random_uniform") def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): """DEPRECATED.""" if dtype is None: @@ -1243,7 +1243,7 @@ def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): ) -@keras_core_export("keras_core._legacy.backend.random_uniform_variable") +@keras_export("keras._legacy.backend.random_uniform_variable") def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): """DEPRECATED.""" if dtype is None: @@ -1258,13 +1258,13 @@ def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): return variable(value, dtype=dtype, name=name) -@keras_core_export("keras_core._legacy.backend.reshape") +@keras_export("keras._legacy.backend.reshape") def reshape(x, shape): """DEPRECATED.""" return tf.reshape(x, shape) -@keras_core_export("keras_core._legacy.backend.relu") +@keras_export("keras._legacy.backend.relu") def relu(x, alpha=0.0, max_value=None, threshold=0.0): """DEPRECATED.""" # While x can be a tensor or variable, we also see cases where @@ -1303,7 +1303,7 @@ def relu(x, alpha=0.0, max_value=None, threshold=0.0): return x -@keras_core_export("keras_core._legacy.backend.repeat") +@keras_export("keras._legacy.backend.repeat") def repeat(x, n): """DEPRECATED.""" assert ndim(x) == 2 @@ -1312,7 +1312,7 @@ def repeat(x, n): return tf.tile(x, pattern) -@keras_core_export("keras_core._legacy.backend.repeat_elements") +@keras_export("keras._legacy.backend.repeat_elements") def repeat_elements(x, rep, axis): """DEPRECATED.""" x_shape = x.shape.as_list() @@ -1350,7 +1350,7 @@ def repeat_elements(x, rep, axis): return x_rep -@keras_core_export("keras_core._legacy.backend.resize_images") +@keras_export("keras._legacy.backend.resize_images") def resize_images( x, height_factor, width_factor, data_format, interpolation="nearest" ): @@ -1397,7 +1397,7 @@ def resize_images( return x -@keras_core_export("keras_core._legacy.backend.resize_volumes") +@keras_export("keras._legacy.backend.resize_volumes") def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): """DEPRECATED.""" if data_format == "channels_first": @@ -1414,7 +1414,7 @@ def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): raise ValueError(f"Invalid data_format: {data_format}") -@keras_core_export("keras_core._legacy.backend.reverse") +@keras_export("keras._legacy.backend.reverse") def reverse(x, axes): """DEPRECATED.""" if isinstance(axes, int): @@ -1422,7 +1422,7 @@ def reverse(x, axes): return tf.reverse(x, axes) -@keras_core_export("keras_core._legacy.backend.rnn") +@keras_export("keras._legacy.backend.rnn") def rnn( step_function, inputs, @@ -1828,13 +1828,13 @@ def rnn( return last_output, outputs, new_states -@keras_core_export("keras_core._legacy.backend.round") +@keras_export("keras._legacy.backend.round") def round(x): """DEPRECATED.""" return tf.round(x) -@keras_core_export("keras_core._legacy.backend.separable_conv2d") +@keras_export("keras._legacy.backend.separable_conv2d") def separable_conv2d( x, depthwise_kernel, @@ -1875,39 +1875,39 @@ def separable_conv2d( return x -@keras_core_export("keras_core._legacy.backend.set_value") +@keras_export("keras._legacy.backend.set_value") def set_value(x, value): """DEPRECATED.""" value = np.asarray(value, dtype=x.dtype.name) x.assign(value) -@keras_core_export("keras_core._legacy.backend.shape") +@keras_export("keras._legacy.backend.shape") def shape(x): """DEPRECATED.""" return tf.shape(x) -@keras_core_export("keras_core._legacy.backend.sigmoid") +@keras_export("keras._legacy.backend.sigmoid") def sigmoid(x): """DEPRECATED.""" output = tf.sigmoid(x) return output -@keras_core_export("keras_core._legacy.backend.sign") +@keras_export("keras._legacy.backend.sign") def sign(x): """DEPRECATED.""" return tf.sign(x) -@keras_core_export("keras_core._legacy.backend.sin") +@keras_export("keras._legacy.backend.sin") def sin(x): """DEPRECATED.""" return tf.sin(x) -@keras_core_export("keras_core._legacy.backend.softmax") +@keras_export("keras._legacy.backend.softmax") def softmax(x, axis=-1): """DEPRECATED.""" if x.shape.rank <= 1: @@ -1928,19 +1928,19 @@ def softmax(x, axis=-1): return output -@keras_core_export("keras_core._legacy.backend.softplus") +@keras_export("keras._legacy.backend.softplus") def softplus(x): """DEPRECATED.""" return tf.math.softplus(x) -@keras_core_export("keras_core._legacy.backend.softsign") +@keras_export("keras._legacy.backend.softsign") def softsign(x): """DEPRECATED.""" return tf.math.softsign(x) -@keras_core_export("keras_core._legacy.backend.sparse_categorical_crossentropy") +@keras_export("keras._legacy.backend.sparse_categorical_crossentropy") def sparse_categorical_crossentropy( target, output, from_logits=False, axis=-1, ignore_class=None ): @@ -2013,7 +2013,7 @@ def sparse_categorical_crossentropy( return res -@keras_core_export("keras_core._legacy.backend.spatial_2d_padding") +@keras_export("keras._legacy.backend.spatial_2d_padding") def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """DEPRECATED.""" assert len(padding) == 2 @@ -2031,7 +2031,7 @@ def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): return tf.compat.v1.pad(x, pattern) -@keras_core_export("keras_core._legacy.backend.spatial_3d_padding") +@keras_export("keras._legacy.backend.spatial_3d_padding") def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): """DEPRECATED.""" assert len(padding) == 3 @@ -2062,7 +2062,7 @@ def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): return tf.compat.v1.pad(x, pattern) -@keras_core_export("keras_core._legacy.backend.sqrt") +@keras_export("keras._legacy.backend.sqrt") def sqrt(x): """DEPRECATED.""" zero = tf.convert_to_tensor(0.0, x.dtype) @@ -2070,25 +2070,25 @@ def sqrt(x): return tf.sqrt(x) -@keras_core_export("keras_core._legacy.backend.square") +@keras_export("keras._legacy.backend.square") def square(x): """DEPRECATED.""" return tf.square(x) -@keras_core_export("keras_core._legacy.backend.squeeze") +@keras_export("keras._legacy.backend.squeeze") def squeeze(x, axis): """DEPRECATED.""" return tf.squeeze(x, [axis]) -@keras_core_export("keras_core._legacy.backend.stack") +@keras_export("keras._legacy.backend.stack") def stack(x, axis=0): """DEPRECATED.""" return tf.stack(x, axis=axis) -@keras_core_export("keras_core._legacy.backend.std") +@keras_export("keras._legacy.backend.std") def std(x, axis=None, keepdims=False): """DEPRECATED.""" if x.dtype.base_dtype == tf.bool: @@ -2096,7 +2096,7 @@ def std(x, axis=None, keepdims=False): return tf.math.reduce_std(x, axis=axis, keepdims=keepdims) -@keras_core_export("keras_core._legacy.backend.stop_gradient") +@keras_export("keras._legacy.backend.stop_gradient") def stop_gradient(variables): """DEPRECATED.""" if isinstance(variables, (list, tuple)): @@ -2104,13 +2104,13 @@ def stop_gradient(variables): return tf.stop_gradient(variables) -@keras_core_export("keras_core._legacy.backend.sum") +@keras_export("keras._legacy.backend.sum") def sum(x, axis=None, keepdims=False): """DEPRECATED.""" return tf.reduce_sum(x, axis, keepdims) -@keras_core_export("keras_core._legacy.backend.switch") +@keras_export("keras._legacy.backend.switch") def switch(condition, then_expression, else_expression): """DEPRECATED.""" if condition.dtype != tf.bool: @@ -2166,13 +2166,13 @@ def switch(condition, then_expression, else_expression): return x -@keras_core_export("keras_core._legacy.backend.tanh") +@keras_export("keras._legacy.backend.tanh") def tanh(x): """DEPRECATED.""" return tf.tanh(x) -@keras_core_export("keras_core._legacy.backend.temporal_padding") +@keras_export("keras._legacy.backend.temporal_padding") def temporal_padding(x, padding=(1, 1)): """DEPRECATED.""" assert len(padding) == 2 @@ -2180,7 +2180,7 @@ def temporal_padding(x, padding=(1, 1)): return tf.compat.v1.pad(x, pattern) -@keras_core_export("keras_core._legacy.backend.tile") +@keras_export("keras._legacy.backend.tile") def tile(x, n): """DEPRECATED.""" if isinstance(n, int): @@ -2188,7 +2188,7 @@ def tile(x, n): return tf.tile(x, n) -@keras_core_export("keras_core._legacy.backend.to_dense") +@keras_export("keras._legacy.backend.to_dense") def to_dense(tensor): """DEPRECATED.""" if is_sparse(tensor): @@ -2197,13 +2197,13 @@ def to_dense(tensor): return tensor -@keras_core_export("keras_core._legacy.backend.transpose") +@keras_export("keras._legacy.backend.transpose") def transpose(x): """DEPRECATED.""" return tf.transpose(x) -@keras_core_export("keras_core._legacy.backend.truncated_normal") +@keras_export("keras._legacy.backend.truncated_normal") def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """DEPRECATED.""" if dtype is None: @@ -2215,25 +2215,25 @@ def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): ) -@keras_core_export("keras_core._legacy.backend.update") +@keras_export("keras._legacy.backend.update") def update(x, new_x): """DEPRECATED.""" return tf.compat.v1.assign(x, new_x) -@keras_core_export("keras_core._legacy.backend.update_add") +@keras_export("keras._legacy.backend.update_add") def update_add(x, increment): """DEPRECATED.""" return tf.compat.v1.assign_add(x, increment) -@keras_core_export("keras_core._legacy.backend.update_sub") +@keras_export("keras._legacy.backend.update_sub") def update_sub(x, decrement): """DEPRECATED.""" return tf.compat.v1.assign_sub(x, decrement) -@keras_core_export("keras_core._legacy.backend.var") +@keras_export("keras._legacy.backend.var") def var(x, axis=None, keepdims=False): """DEPRECATED.""" if x.dtype.base_dtype == tf.bool: @@ -2241,7 +2241,7 @@ def var(x, axis=None, keepdims=False): return tf.math.reduce_variance(x, axis=axis, keepdims=keepdims) -@keras_core_export("keras_core._legacy.backend.variable") +@keras_export("keras._legacy.backend.variable") def variable(value, dtype=None, name=None, constraint=None): """DEPRECATED.""" if dtype is None: @@ -2268,7 +2268,7 @@ def variable(value, dtype=None, name=None, constraint=None): return v -@keras_core_export("keras_core._legacy.backend.zeros") +@keras_export("keras._legacy.backend.zeros") def zeros(shape, dtype=None, name=None): """DEPRECATED.""" with tf.init_scope(): @@ -2281,7 +2281,7 @@ def zeros(shape, dtype=None, name=None): return v -@keras_core_export("keras_core._legacy.backend.zeros_like") +@keras_export("keras._legacy.backend.zeros_like") def zeros_like(x, dtype=None, name=None): """DEPRECATED.""" return tf.zeros_like(x, dtype=dtype, name=name) diff --git a/keras_core/legacy/layers.py b/keras/legacy/layers.py similarity index 94% rename from keras_core/legacy/layers.py rename to keras/legacy/layers.py index bc7114929..2ceaa151f 100644 --- a/keras_core/legacy/layers.py +++ b/keras/legacy/layers.py @@ -6,13 +6,13 @@ RandomWidth ThresholdedReLU """ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core._legacy.layers.AlphaDropout") +@keras_export("keras._legacy.layers.AlphaDropout") class AlphaDropout(Layer): """DEPRECATED.""" @@ -62,7 +62,7 @@ class AlphaDropout(Layer): return input_shape -@keras_core_export("keras_core._legacy.layers.RandomHeight") +@keras_export("keras._legacy.layers.RandomHeight") class RandomHeight(Layer): """DEPRECATED.""" @@ -138,7 +138,7 @@ class RandomHeight(Layer): return {**base_config, **config} -@keras_core_export("keras_core._legacy.layers.RandomWidth") +@keras_export("keras._legacy.layers.RandomWidth") class RandomWidth(Layer): """DEPRECATED.""" @@ -213,7 +213,7 @@ class RandomWidth(Layer): return {**base_config, **config} -@keras_core_export("keras_core._legacy.layers.ThresholdedReLU") +@keras_export("keras._legacy.layers.ThresholdedReLU") class ThresholdedReLU(Layer): """DEPRECATED.""" diff --git a/keras_core/legacy/losses.py b/keras/legacy/losses.py similarity index 79% rename from keras_core/legacy/losses.py rename to keras/legacy/losses.py index 8e0d25fd8..f0cd872bc 100644 --- a/keras_core/legacy/losses.py +++ b/keras/legacy/losses.py @@ -1,7 +1,7 @@ -from keras_core.api_export import keras_core_export +from keras.api_export import keras_export -@keras_core_export("keras_core._legacy.losses.Reduction") +@keras_export("keras._legacy.losses.Reduction") class Reduction: AUTO = "auto" NONE = "none" diff --git a/keras_core/legacy/preprocessing/__init__.py b/keras/legacy/preprocessing/__init__.py similarity index 100% rename from keras_core/legacy/preprocessing/__init__.py rename to keras/legacy/preprocessing/__init__.py diff --git a/keras_core/legacy/preprocessing/image.py b/keras/legacy/preprocessing/image.py similarity index 98% rename from keras_core/legacy/preprocessing/image.py rename to keras/legacy/preprocessing/image.py index 0d5913c38..4ce3b7a57 100644 --- a/keras_core/legacy/preprocessing/image.py +++ b/keras/legacy/preprocessing/image.py @@ -8,15 +8,15 @@ import warnings import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.trainers.data_adapters.py_dataset_adapter import PyDataset -from keras_core.utils import image_utils -from keras_core.utils import io_utils -from keras_core.utils.module_utils import scipy +from keras import backend +from keras.api_export import keras_export +from keras.trainers.data_adapters.py_dataset_adapter import PyDataset +from keras.utils import image_utils +from keras.utils import io_utils +from keras.utils.module_utils import scipy -@keras_core_export("keras_core._legacy.preprocessing.image.Iterator") +@keras_export("keras._legacy.preprocessing.image.Iterator") class Iterator(PyDataset): """Base class for image data iterators. @@ -389,7 +389,7 @@ class BatchFromFilesMixin: ) -@keras_core_export("keras_core._legacy.preprocessing.image.DirectoryIterator") +@keras_export("keras._legacy.preprocessing.image.DirectoryIterator") class DirectoryIterator(BatchFromFilesMixin, Iterator): """Iterator capable of reading images from a directory on disk. @@ -512,7 +512,7 @@ class DirectoryIterator(BatchFromFilesMixin, Iterator): return None -@keras_core_export("keras_core._legacy.preprocessing.image.NumpyArrayIterator") +@keras_export("keras._legacy.preprocessing.image.NumpyArrayIterator") class NumpyArrayIterator(Iterator): """Iterator yielding data from a Numpy array. @@ -946,7 +946,7 @@ def flip_axis(x, axis): return x -@keras_core_export("keras_core._legacy.preprocessing.image.ImageDataGenerator") +@keras_export("keras._legacy.preprocessing.image.ImageDataGenerator") class ImageDataGenerator: """DEPRECATED.""" @@ -1546,7 +1546,7 @@ class ImageDataGenerator: self.zca_whitening_matrix = (u * s_inv).dot(u.T) -@keras_core_export("keras_core._legacy.preprocessing.image.random_rotation") +@keras_export("keras._legacy.preprocessing.image.random_rotation") def random_rotation( x, rg, @@ -1572,7 +1572,7 @@ def random_rotation( return x -@keras_core_export("keras_core._legacy.preprocessing.image.random_shift") +@keras_export("keras._legacy.preprocessing.image.random_shift") def random_shift( x, wrg, @@ -1602,7 +1602,7 @@ def random_shift( return x -@keras_core_export("keras_core._legacy.preprocessing.image.random_shear") +@keras_export("keras._legacy.preprocessing.image.random_shear") def random_shear( x, intensity, @@ -1628,7 +1628,7 @@ def random_shear( return x -@keras_core_export("keras_core._legacy.preprocessing.image.random_zoom") +@keras_export("keras._legacy.preprocessing.image.random_zoom") def random_zoom( x, zoom_range, @@ -1664,7 +1664,7 @@ def random_zoom( return x -@keras_core_export("keras_core._legacy.preprocessing.image.apply_channel_shift") +@keras_export("keras._legacy.preprocessing.image.apply_channel_shift") def apply_channel_shift(x, intensity, channel_axis=0): """Performs a channel shift. @@ -1688,8 +1688,8 @@ def apply_channel_shift(x, intensity, channel_axis=0): return x -@keras_core_export( - "keras_core._legacy.preprocessing.image.random_channel_shift" +@keras_export( + "keras._legacy.preprocessing.image.random_channel_shift" ) def random_channel_shift(x, intensity_range, channel_axis=0): """Performs a random channel shift. @@ -1708,8 +1708,8 @@ def random_channel_shift(x, intensity_range, channel_axis=0): return apply_channel_shift(x, intensity, channel_axis=channel_axis) -@keras_core_export( - "keras_core._legacy.preprocessing.image.apply_brightness_shift" +@keras_export( + "keras._legacy.preprocessing.image.apply_brightness_shift" ) def apply_brightness_shift(x, brightness, scale=True): """Performs a brightness shift. @@ -1741,7 +1741,7 @@ def apply_brightness_shift(x, brightness, scale=True): return x -@keras_core_export("keras_core._legacy.preprocessing.image.random_brightness") +@keras_export("keras._legacy.preprocessing.image.random_brightness") def random_brightness(x, brightness_range, scale=True): """Performs a random brightness shift. @@ -1778,8 +1778,8 @@ def transform_matrix_offset_center(matrix, x, y): return transform_matrix -@keras_core_export( - "keras_core._legacy.preprocessing.image.apply_affine_transform" +@keras_export( + "keras._legacy.preprocessing.image.apply_affine_transform" ) def apply_affine_transform( x, diff --git a/keras_core/legacy/preprocessing/sequence.py b/keras/legacy/preprocessing/sequence.py similarity index 96% rename from keras_core/legacy/preprocessing/sequence.py rename to keras/legacy/preprocessing/sequence.py index 92e2f853b..d7932dbec 100644 --- a/keras_core/legacy/preprocessing/sequence.py +++ b/keras/legacy/preprocessing/sequence.py @@ -5,12 +5,12 @@ import random import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.trainers.data_adapters.py_dataset_adapter import PyDataset +from keras.api_export import keras_export +from keras.trainers.data_adapters.py_dataset_adapter import PyDataset -@keras_core_export( - "keras_core._legacy.preprocessing.sequence.TimeseriesGenerator" +@keras_export( + "keras._legacy.preprocessing.sequence.TimeseriesGenerator" ) class TimeseriesGenerator(PyDataset): """Utility class for generating batches of temporal data. @@ -178,8 +178,8 @@ class TimeseriesGenerator(PyDataset): return json.dumps(timeseries_generator_config, **kwargs) -@keras_core_export( - "keras_core._legacy.preprocessing.sequence.make_sampling_table" +@keras_export( + "keras._legacy.preprocessing.sequence.make_sampling_table" ) def make_sampling_table(size, sampling_factor=1e-5): """Generates a word rank-based probabilistic sampling table. @@ -222,7 +222,7 @@ def make_sampling_table(size, sampling_factor=1e-5): return np.minimum(1.0, f / np.sqrt(f)) -@keras_core_export("keras_core._legacy.preprocessing.sequence.skipgrams") +@keras_export("keras._legacy.preprocessing.sequence.skipgrams") def skipgrams( sequence, vocabulary_size, diff --git a/keras_core/legacy/preprocessing/text.py b/keras/legacy/preprocessing/text.py similarity index 96% rename from keras_core/legacy/preprocessing/text.py rename to keras/legacy/preprocessing/text.py index f300953ca..f61fd98da 100644 --- a/keras_core/legacy/preprocessing/text.py +++ b/keras/legacy/preprocessing/text.py @@ -7,11 +7,11 @@ import warnings import numpy as np -from keras_core.api_export import keras_core_export +from keras.api_export import keras_export -@keras_core_export( - "keras_core._legacy.preprocessing.text.text_to_word_sequence" +@keras_export( + "keras._legacy.preprocessing.text.text_to_word_sequence" ) def text_to_word_sequence( input_text, @@ -31,7 +31,7 @@ def text_to_word_sequence( return [i for i in seq if i] -@keras_core_export("keras_core._legacy.preprocessing.text.one_hot") +@keras_export("keras._legacy.preprocessing.text.one_hot") def one_hot( input_text, n, @@ -52,7 +52,7 @@ def one_hot( ) -@keras_core_export("keras_core._legacy.preprocessing.text.hashing_trick") +@keras_export("keras._legacy.preprocessing.text.hashing_trick") def hashing_trick( text, n, @@ -80,7 +80,7 @@ def hashing_trick( return [(hash_function(w) % (n - 1) + 1) for w in seq] -@keras_core_export("keras_core._legacy.preprocessing.text.Tokenizer") +@keras_export("keras._legacy.preprocessing.text.Tokenizer") class Tokenizer(object): """DEPRECATED.""" @@ -314,7 +314,7 @@ class Tokenizer(object): return json.dumps(tokenizer_config, **kwargs) -@keras_core_export("keras_core._legacy.preprocessing.text.tokenizer_from_json") +@keras_export("keras._legacy.preprocessing.text.tokenizer_from_json") def tokenizer_from_json(json_string): """DEPRECATED.""" tokenizer_config = json.loads(json_string) diff --git a/keras_core/legacy/saving/__init__.py b/keras/legacy/saving/__init__.py similarity index 100% rename from keras_core/legacy/saving/__init__.py rename to keras/legacy/saving/__init__.py diff --git a/keras_core/legacy/saving/json_utils.py b/keras/legacy/saving/json_utils.py similarity index 97% rename from keras_core/legacy/saving/json_utils.py rename to keras/legacy/saving/json_utils.py index b91fdd508..0a7f6254f 100644 --- a/keras_core/legacy/saving/json_utils.py +++ b/keras/legacy/saving/json_utils.py @@ -7,9 +7,9 @@ import json import numpy as np -from keras_core.legacy.saving import serialization -from keras_core.saving import serialization_lib -from keras_core.utils.module_utils import tensorflow as tf +from keras.legacy.saving import serialization +from keras.saving import serialization_lib +from keras.utils.module_utils import tensorflow as tf _EXTENSION_TYPE_SPEC = "_EXTENSION_TYPE_SPEC" diff --git a/keras_core/legacy/saving/json_utils_test.py b/keras/legacy/saving/json_utils_test.py similarity index 96% rename from keras_core/legacy/saving/json_utils_test.py rename to keras/legacy/saving/json_utils_test.py index 509269536..8d82f52a4 100644 --- a/keras_core/legacy/saving/json_utils_test.py +++ b/keras/legacy/saving/json_utils_test.py @@ -2,9 +2,9 @@ import enum import pytest -from keras_core import backend -from keras_core import testing -from keras_core.legacy.saving import json_utils +from keras import backend +from keras import testing +from keras.legacy.saving import json_utils if backend.backend() == "tensorflow": import tensorflow as tf diff --git a/keras_core/legacy/saving/legacy_h5_format.py b/keras/legacy/saving/legacy_h5_format.py similarity index 98% rename from keras_core/legacy/saving/legacy_h5_format.py rename to keras/legacy/saving/legacy_h5_format.py index 863f8c7ac..f681f85c3 100644 --- a/keras_core/legacy/saving/legacy_h5_format.py +++ b/keras/legacy/saving/legacy_h5_format.py @@ -5,14 +5,14 @@ import warnings import numpy as np from absl import logging -from keras_core import backend -from keras_core import optimizers -from keras_core.backend.common import global_state -from keras_core.legacy.saving import json_utils -from keras_core.legacy.saving import saving_options -from keras_core.legacy.saving import saving_utils -from keras_core.saving import object_registration -from keras_core.utils import io_utils +from keras import backend +from keras import optimizers +from keras.backend.common import global_state +from keras.legacy.saving import json_utils +from keras.legacy.saving import saving_options +from keras.legacy.saving import saving_utils +from keras.saving import object_registration +from keras.utils import io_utils try: import h5py @@ -200,7 +200,7 @@ def save_weights_to_hdf5_group(f, model): f: HDF5 group. model: Model instance. """ - from keras_core import __version__ as keras_version + from keras import __version__ as keras_version save_attributes_to_hdf5_group( f, "layer_names", [layer.name.encode("utf8") for layer in model.layers] diff --git a/keras_core/legacy/saving/legacy_h5_format_test.py b/keras/legacy/saving/legacy_h5_format_test.py similarity index 95% rename from keras_core/legacy/saving/legacy_h5_format_test.py rename to keras/legacy/saving/legacy_h5_format_test.py index dafb2f0d5..d6bd82d92 100644 --- a/keras_core/legacy/saving/legacy_h5_format_test.py +++ b/keras/legacy/saving/legacy_h5_format_test.py @@ -3,14 +3,14 @@ import os import numpy as np import pytest -import keras_core -from keras_core import layers -from keras_core import models -from keras_core import ops -from keras_core import testing -from keras_core.legacy.saving import legacy_h5_format -from keras_core.saving import object_registration -from keras_core.saving import serialization_lib +import keras +from keras import layers +from keras import models +from keras import ops +from keras import testing +from keras.legacy.saving import legacy_h5_format +from keras.saving import object_registration +from keras.saving import serialization_lib # TODO: more thorough testing. Correctness depends # on exact weight ordering for each layer, so we need @@ -78,19 +78,19 @@ class LegacyH5WeightsTest(testing.TestCase): self.assertAllClose(ref_output, output, atol=1e-5) def DISABLED_test_sequential_model_weights(self): - model = get_sequential_model(keras_core) + model = get_sequential_model(keras) tf_keras_model = get_sequential_model(tf_keras) ref_input = np.random.random((2, 3)) self._check_reloading_weights(ref_input, model, tf_keras_model) def DISABLED_test_functional_model_weights(self): - model = get_functional_model(keras_core) + model = get_functional_model(keras) tf_keras_model = get_functional_model(tf_keras) ref_input = np.random.random((2, 3)) self._check_reloading_weights(ref_input, model, tf_keras_model) def DISABLED_test_subclassed_model_weights(self): - model = get_subclassed_model(keras_core) + model = get_subclassed_model(keras) tf_keras_model = get_subclassed_model(tf_keras) ref_input = np.random.random((2, 3)) self._check_reloading_weights(ref_input, model, tf_keras_model) @@ -108,12 +108,12 @@ class LegacyH5WholeModelTest(testing.TestCase): self.assertAllClose(ref_output, output, atol=1e-5) def DISABLED_test_sequential_model(self): - model = get_sequential_model(keras_core) + model = get_sequential_model(keras) ref_input = np.random.random((2, 3)) self._check_reloading_model(ref_input, model) def DISABLED_test_functional_model(self): - model = get_functional_model(keras_core) + model = get_functional_model(keras) ref_input = np.random.random((2, 3)) self._check_reloading_model(ref_input, model) @@ -233,7 +233,7 @@ class LegacyH5WholeModelTest(testing.TestCase): ) model = models.Sequential([layer]) with self.subTest("test_JSON"): - from keras_core.models.model import model_from_json + from keras.models.model import model_from_json model_json = model.to_json() self.assertIn("Foo>RegisteredSubLayer", model_json) @@ -274,14 +274,14 @@ class LegacyH5BackwardsCompatTest(testing.TestCase): self.assertAllClose(ref_output, output, atol=1e-5) def DISABLED_test_sequential_model(self): - model = get_sequential_model(keras_core) + model = get_sequential_model(keras) tf_keras_model = get_sequential_model(tf_keras) ref_input = np.random.random((2, 3)) self._check_reloading_model(ref_input, model, tf_keras_model) def DISABLED_test_functional_model(self): tf_keras_model = get_functional_model(tf_keras) - model = get_functional_model(keras_core) + model = get_functional_model(keras) ref_input = np.random.random((2, 3)) self._check_reloading_model(ref_input, model, tf_keras_model) @@ -352,7 +352,7 @@ class LegacyH5BackwardsCompatTest(testing.TestCase): custom_layer = MyDense(1) tf_keras_model = tf_keras.Sequential(layers=[inputs, custom_layer]) - # Re-implement and re-register in Keras Core + # Re-implement and re-register in Keras 3 @object_registration.register_keras_serializable(package="my_package") class MyDense(layers.Dense): def __init__(self, units, **kwargs): @@ -375,7 +375,7 @@ class LegacyH5BackwardsCompatTest(testing.TestCase): outputs = MyDense(1)(inputs) tf_keras_model = tf_keras.Model(inputs, outputs) - # Re-implement and re-register in Keras Core + # Re-implement and re-register in Keras 3 @object_registration.register_keras_serializable(package="my_package") class MyDense(layers.Dense): def __init__(self, units, **kwargs): @@ -434,7 +434,7 @@ class LegacyH5BackwardsCompatTest(testing.TestCase): temp_filepath = os.path.join(self.get_temp_dir(), "model.h5") tf_keras_model.save(temp_filepath) - # Re-implement in Keras Core + # Re-implement in Keras 3 class MyLayer(layers.Layer): def __init__(self, sublayers, **kwargs): super().__init__(**kwargs) @@ -462,13 +462,13 @@ class LegacyH5BackwardsCompatTest(testing.TestCase): ) return cls(**config) - # Re-implement and re-register in Keras Core + # Re-implement and re-register in Keras 3 @object_registration.register_keras_serializable(package="Foo") class RegisteredSubLayer(layers.Layer): def call(self, x): return x - # Load in Keras Core + # Load in Keras 3 loaded_model = legacy_h5_format.load_model_from_hdf5( temp_filepath, custom_objects={"MyLayer": MyLayer} ) @@ -489,7 +489,7 @@ class LegacyH5BackwardsCompatTest(testing.TestCase): class DirectoryCreationTest(testing.TestCase): def DISABLED_test_directory_creation_on_save(self): """Test if directory is created on model save.""" - model = get_sequential_model(keras_core) + model = get_sequential_model(keras) nested_dirpath = os.path.join( self.get_temp_dir(), "dir1", "dir2", "dir3" ) diff --git a/keras_core/legacy/saving/saving_options.py b/keras/legacy/saving/saving_options.py similarity index 89% rename from keras_core/legacy/saving/saving_options.py rename to keras/legacy/saving/saving_options.py index ced170088..30ba5fb46 100644 --- a/keras_core/legacy/saving/saving_options.py +++ b/keras/legacy/saving/saving_options.py @@ -1,6 +1,6 @@ import contextlib -from keras_core.backend.common import global_state +from keras.backend.common import global_state @contextlib.contextmanager diff --git a/keras_core/legacy/saving/saving_utils.py b/keras/legacy/saving/saving_utils.py similarity index 93% rename from keras_core/legacy/saving/saving_utils.py rename to keras/legacy/saving/saving_utils.py index d9a8dded0..7421111ad 100644 --- a/keras_core/legacy/saving/saving_utils.py +++ b/keras/legacy/saving/saving_utils.py @@ -4,18 +4,18 @@ import threading import tree from absl import logging -from keras_core import backend -from keras_core import layers -from keras_core import losses -from keras_core import metrics as metrics_module -from keras_core import models -from keras_core import optimizers -from keras_core.legacy.saving import serialization -from keras_core.saving import object_registration +from keras import backend +from keras import layers +from keras import losses +from keras import metrics as metrics_module +from keras import models +from keras import optimizers +from keras.legacy.saving import serialization +from keras.saving import object_registration MODULE_OBJECTS = threading.local() -# Legacy lambda arguments not found in Keras Core +# Legacy lambda arguments not found in Keras 3 LAMBDA_DEP_ARGS = ( "module", "function_type", @@ -79,8 +79,8 @@ def model_from_config(config, custom_objects=None): config["config"]["function"] = function_dict # TODO(nkovela): Swap find and replace args during Keras 3.0 release - # Replace keras refs with keras_core - config = _find_replace_nested_dict(config, "keras.", "keras_core.") + # Replace keras refs with keras + config = _find_replace_nested_dict(config, "keras.", "keras.") return serialization.deserialize_keras_object( config, @@ -92,7 +92,7 @@ def model_from_config(config, custom_objects=None): def model_metadata(model, include_optimizer=True, require_config=True): """Returns a dictionary containing the model metadata.""" - from keras_core import __version__ as keras_version + from keras import __version__ as keras_version model_config = {"class_name": model.__class__.__name__} try: @@ -238,7 +238,7 @@ def _resolve_compile_arguments_compat(obj, obj_config, module): This helper function accepts built-in Keras modules such as optimizers, losses, and metrics to ensure an object being deserialized is compatible - with Keras Core built-ins. For legacy H5 files saved within Keras Core, + with Keras 3 built-ins. For legacy H5 files saved within Keras 3, this does nothing. """ if isinstance(obj, str) and obj not in module.ALL_OBJECTS_DICT: diff --git a/keras_core/legacy/saving/serialization.py b/keras/legacy/saving/serialization.py similarity index 97% rename from keras_core/legacy/saving/serialization.py rename to keras/legacy/saving/serialization.py index 2da6f7f93..775ea0770 100644 --- a/keras_core/legacy/saving/serialization.py +++ b/keras/legacy/saving/serialization.py @@ -6,8 +6,8 @@ import threading import weakref # isort: off -from keras_core.api_export import keras_core_export -from keras_core.saving import object_registration +from keras.api_export import keras_export +from keras.saving import object_registration # Flag that determines whether to skip the NotImplementedError when calling # get_config in custom models and layers. This is only enabled when saving to @@ -261,10 +261,10 @@ def skip_failed_serialization(): _SKIP_FAILED_SERIALIZATION = prev -@keras_core_export( +@keras_export( [ - "keras_core.legacy.saving.serialize_keras_object", - "keras_core.utils.legacy.serialize_keras_object", + "keras.legacy.saving.serialize_keras_object", + "keras.utils.legacy.serialize_keras_object", ] ) def serialize_keras_object(instance): @@ -405,10 +405,10 @@ def class_and_config_for_serialized_keras_object( return (cls, cls_config) -@keras_core_export( +@keras_export( [ - "keras_core.legacy.saving.deserialize_keras_object", - "keras_core.utils.legacy.deserialize_keras_object", + "keras.legacy.saving.deserialize_keras_object", + "keras.utils.legacy.deserialize_keras_object", ] ) def deserialize_keras_object( @@ -485,9 +485,9 @@ def deserialize_keras_object( custom_objects = custom_objects or {} # TODO(nkovela): Swap find and replace args during Keras 3.0 release - # Replace keras refs with keras_core + # Replace keras refs with keras cls_config = _find_replace_nested_dict( - cls_config, "keras.", "keras_core." + cls_config, "keras.", "keras." ) if "custom_objects" in arg_spec.args: diff --git a/keras_core/losses/__init__.py b/keras/losses/__init__.py similarity index 65% rename from keras_core/losses/__init__.py rename to keras/losses/__init__.py index 6f1c65be4..c04c1fdb1 100644 --- a/keras_core/losses/__init__.py +++ b/keras/losses/__init__.py @@ -1,37 +1,37 @@ -from keras_core.api_export import keras_core_export -from keras_core.losses.loss import Loss -from keras_core.losses.losses import BinaryCrossentropy -from keras_core.losses.losses import CategoricalCrossentropy -from keras_core.losses.losses import CategoricalHinge -from keras_core.losses.losses import CosineSimilarity -from keras_core.losses.losses import Hinge -from keras_core.losses.losses import Huber -from keras_core.losses.losses import KLDivergence -from keras_core.losses.losses import LogCosh -from keras_core.losses.losses import LossFunctionWrapper -from keras_core.losses.losses import MeanAbsoluteError -from keras_core.losses.losses import MeanAbsolutePercentageError -from keras_core.losses.losses import MeanSquaredError -from keras_core.losses.losses import MeanSquaredLogarithmicError -from keras_core.losses.losses import Poisson -from keras_core.losses.losses import SparseCategoricalCrossentropy -from keras_core.losses.losses import SquaredHinge -from keras_core.losses.losses import binary_crossentropy -from keras_core.losses.losses import categorical_crossentropy -from keras_core.losses.losses import categorical_hinge -from keras_core.losses.losses import cosine_similarity -from keras_core.losses.losses import hinge -from keras_core.losses.losses import huber -from keras_core.losses.losses import kl_divergence -from keras_core.losses.losses import log_cosh -from keras_core.losses.losses import mean_absolute_error -from keras_core.losses.losses import mean_absolute_percentage_error -from keras_core.losses.losses import mean_squared_error -from keras_core.losses.losses import mean_squared_logarithmic_error -from keras_core.losses.losses import poisson -from keras_core.losses.losses import sparse_categorical_crossentropy -from keras_core.losses.losses import squared_hinge -from keras_core.saving import serialization_lib +from keras.api_export import keras_export +from keras.losses.loss import Loss +from keras.losses.losses import BinaryCrossentropy +from keras.losses.losses import CategoricalCrossentropy +from keras.losses.losses import CategoricalHinge +from keras.losses.losses import CosineSimilarity +from keras.losses.losses import Hinge +from keras.losses.losses import Huber +from keras.losses.losses import KLDivergence +from keras.losses.losses import LogCosh +from keras.losses.losses import LossFunctionWrapper +from keras.losses.losses import MeanAbsoluteError +from keras.losses.losses import MeanAbsolutePercentageError +from keras.losses.losses import MeanSquaredError +from keras.losses.losses import MeanSquaredLogarithmicError +from keras.losses.losses import Poisson +from keras.losses.losses import SparseCategoricalCrossentropy +from keras.losses.losses import SquaredHinge +from keras.losses.losses import binary_crossentropy +from keras.losses.losses import categorical_crossentropy +from keras.losses.losses import categorical_hinge +from keras.losses.losses import cosine_similarity +from keras.losses.losses import hinge +from keras.losses.losses import huber +from keras.losses.losses import kl_divergence +from keras.losses.losses import log_cosh +from keras.losses.losses import mean_absolute_error +from keras.losses.losses import mean_absolute_percentage_error +from keras.losses.losses import mean_squared_error +from keras.losses.losses import mean_squared_logarithmic_error +from keras.losses.losses import poisson +from keras.losses.losses import sparse_categorical_crossentropy +from keras.losses.losses import squared_hinge +from keras.saving import serialization_lib ALL_OBJECTS = { # Base @@ -94,7 +94,7 @@ ALL_OBJECTS_DICT.update( ) -@keras_core_export("keras_core.losses.serialize") +@keras_export("keras.losses.serialize") def serialize(loss): """Serializes loss function or `Loss` instance. @@ -107,7 +107,7 @@ def serialize(loss): return serialization_lib.serialize_keras_object(loss) -@keras_core_export("keras_core.losses.deserialize") +@keras_export("keras.losses.deserialize") def deserialize(name, custom_objects=None): """Deserializes a serialized loss class/function instance. @@ -127,7 +127,7 @@ def deserialize(name, custom_objects=None): ) -@keras_core_export("keras_core.losses.get") +@keras_export("keras.losses.get") def get(identifier): """Retrieves a Keras loss as a `function`/`Loss` class instance. diff --git a/keras_core/losses/loss.py b/keras/losses/loss.py similarity index 96% rename from keras_core/losses/loss.py rename to keras/losses/loss.py index d78e21a5a..c8aa0a149 100644 --- a/keras_core/losses/loss.py +++ b/keras/losses/loss.py @@ -1,12 +1,12 @@ import tree -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.utils.naming import auto_name +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.utils.naming import auto_name -@keras_core_export(["keras_core.Loss", "keras_core.losses.Loss"]) +@keras_export(["keras.Loss", "keras.losses.Loss"]) class Loss: """Loss base class. diff --git a/keras_core/losses/loss_test.py b/keras/losses/loss_test.py similarity index 97% rename from keras_core/losses/loss_test.py rename to keras/losses/loss_test.py index 77dd939dd..ce45ee3d7 100644 --- a/keras_core/losses/loss_test.py +++ b/keras/losses/loss_test.py @@ -1,11 +1,11 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import losses as losses_module -from keras_core import ops -from keras_core import testing -from keras_core.losses.loss import Loss +from keras import backend +from keras import losses as losses_module +from keras import ops +from keras import testing +from keras.losses.loss import Loss class ExampleLoss(Loss): diff --git a/keras_core/losses/losses.py b/keras/losses/losses.py similarity index 88% rename from keras_core/losses/losses.py rename to keras/losses/losses.py index ab36dc7a9..c1b86800a 100644 --- a/keras_core/losses/losses.py +++ b/keras/losses/losses.py @@ -1,12 +1,12 @@ import warnings -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.losses.loss import Loss -from keras_core.losses.loss import squeeze_to_same_rank -from keras_core.saving import serialization_lib -from keras_core.utils.numerical_utils import normalize +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.losses.loss import Loss +from keras.losses.loss import squeeze_to_same_rank +from keras.saving import serialization_lib +from keras.utils.numerical_utils import normalize class LossFunctionWrapper(Loss): @@ -34,7 +34,7 @@ class LossFunctionWrapper(Loss): return cls(**config) -@keras_core_export("keras_core.losses.MeanSquaredError") +@keras_export("keras.losses.MeanSquaredError") class MeanSquaredError(LossFunctionWrapper): """Computes the mean of squares of errors between labels and predictions. @@ -60,7 +60,7 @@ class MeanSquaredError(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.MeanAbsoluteError") +@keras_export("keras.losses.MeanAbsoluteError") class MeanAbsoluteError(LossFunctionWrapper): """Computes the mean of absolute difference between labels and predictions. @@ -86,7 +86,7 @@ class MeanAbsoluteError(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.MeanAbsolutePercentageError") +@keras_export("keras.losses.MeanAbsolutePercentageError") class MeanAbsolutePercentageError(LossFunctionWrapper): """Computes the mean absolute percentage error between `y_true` & `y_pred`. @@ -116,7 +116,7 @@ class MeanAbsolutePercentageError(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.MeanSquaredLogarithmicError") +@keras_export("keras.losses.MeanSquaredLogarithmicError") class MeanSquaredLogarithmicError(LossFunctionWrapper): """Computes the mean squared logarithmic error between `y_true` & `y_pred`. @@ -146,7 +146,7 @@ class MeanSquaredLogarithmicError(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.CosineSimilarity") +@keras_export("keras.losses.CosineSimilarity") class CosineSimilarity(LossFunctionWrapper): """Computes the cosine similarity between `y_true` & `y_pred`. @@ -183,7 +183,7 @@ class CosineSimilarity(LossFunctionWrapper): ) -@keras_core_export("keras_core.losses.Huber") +@keras_export("keras.losses.Huber") class Huber(LossFunctionWrapper): """Computes the Huber loss between `y_true` & `y_pred`. @@ -218,7 +218,7 @@ class Huber(LossFunctionWrapper): super().__init__(huber, name=name, reduction=reduction, delta=delta) -@keras_core_export("keras_core.losses.LogCosh") +@keras_export("keras.losses.LogCosh") class LogCosh(LossFunctionWrapper): """Computes the logarithm of the hyperbolic cosine of the prediction error. @@ -241,7 +241,7 @@ class LogCosh(LossFunctionWrapper): super().__init__(log_cosh, name=name, reduction=reduction) -@keras_core_export("keras_core.losses.Hinge") +@keras_export("keras.losses.Hinge") class Hinge(LossFunctionWrapper): """Computes the hinge loss between `y_true` & `y_pred`. @@ -268,7 +268,7 @@ class Hinge(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.SquaredHinge") +@keras_export("keras.losses.SquaredHinge") class SquaredHinge(LossFunctionWrapper): """Computes the squared hinge loss between `y_true` & `y_pred`. @@ -295,7 +295,7 @@ class SquaredHinge(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.CategoricalHinge") +@keras_export("keras.losses.CategoricalHinge") class CategoricalHinge(LossFunctionWrapper): """Computes the categorical hinge loss between `y_true` & `y_pred`. @@ -323,7 +323,7 @@ class CategoricalHinge(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.KLDivergence") +@keras_export("keras.losses.KLDivergence") class KLDivergence(LossFunctionWrapper): """Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`. @@ -347,7 +347,7 @@ class KLDivergence(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.Poisson") +@keras_export("keras.losses.Poisson") class Poisson(LossFunctionWrapper): """Computes the Poisson loss between `y_true` & `y_pred`. @@ -371,7 +371,7 @@ class Poisson(LossFunctionWrapper): return Loss.get_config(self) -@keras_core_export("keras_core.losses.BinaryCrossentropy") +@keras_export("keras.losses.BinaryCrossentropy") class BinaryCrossentropy(LossFunctionWrapper): """Computes the cross-entropy loss between true labels and predicted labels. @@ -409,7 +409,7 @@ class BinaryCrossentropy(LossFunctionWrapper): ```python model.compile( - loss=keras_core.losses.BinaryCrossentropy(from_logits=True), + loss=keras.losses.BinaryCrossentropy(from_logits=True), ... ) ``` @@ -419,7 +419,7 @@ class BinaryCrossentropy(LossFunctionWrapper): >>> # Example 1: (batch_size = 1, number of samples = 4) >>> y_true = [0, 1, 0, 0] >>> y_pred = [-18.6, 0.51, 2.94, -12.8] - >>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True) + >>> bce = keras.losses.BinaryCrossentropy(from_logits=True) >>> bce(y_true, y_pred) 0.865 @@ -427,19 +427,19 @@ class BinaryCrossentropy(LossFunctionWrapper): >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]] >>> # Using default 'auto'/'sum_over_batch_size' reduction type. - >>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True) + >>> bce = keras.losses.BinaryCrossentropy(from_logits=True) >>> bce(y_true, y_pred) 0.865 >>> # Using 'sample_weight' attribute >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]) 0.243 >>> # Using 'sum' reduction` type. - >>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True, + >>> bce = keras.losses.BinaryCrossentropy(from_logits=True, ... reduction="sum") >>> bce(y_true, y_pred) 1.730 >>> # Using 'none' reduction type. - >>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True, + >>> bce = keras.losses.BinaryCrossentropy(from_logits=True, ... reduction=None) >>> bce(y_true, y_pred) array([0.235, 1.496], dtype=float32) @@ -448,7 +448,7 @@ class BinaryCrossentropy(LossFunctionWrapper): >>> # Make the following updates to the above "Recommended Usage" section >>> # 1. Set `from_logits=False` - >>> keras_core.losses.BinaryCrossentropy() # OR ...('from_logits=False') + >>> keras.losses.BinaryCrossentropy() # OR ...('from_logits=False') >>> # 2. Update `y_pred` to use probabilities instead of logits >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]] """ @@ -483,7 +483,7 @@ class BinaryCrossentropy(LossFunctionWrapper): } -@keras_core_export("keras_core.losses.BinaryFocalCrossentropy") +@keras_export("keras.losses.BinaryFocalCrossentropy") class BinaryFocalCrossentropy(LossFunctionWrapper): """Computes focal cross-entropy loss between true labels and predictions. @@ -537,7 +537,7 @@ class BinaryFocalCrossentropy(LossFunctionWrapper): ```python model.compile( - loss=keras_core.losses.BinaryFocalCrossentropy( + loss=keras.losses.BinaryFocalCrossentropy( gamma=2.0, from_logits=True), ... ) @@ -548,13 +548,13 @@ class BinaryFocalCrossentropy(LossFunctionWrapper): >>> # Example 1: (batch_size = 1, number of samples = 4) >>> y_true = [0, 1, 0, 0] >>> y_pred = [-18.6, 0.51, 2.94, -12.8] - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... gamma=2, from_logits=True) >>> loss(y_true, y_pred) 0.691 >>> # Apply class weight - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=2, from_logits=True) >>> loss(y_true, y_pred) 0.51 @@ -563,52 +563,52 @@ class BinaryFocalCrossentropy(LossFunctionWrapper): >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]] >>> # Using default 'auto'/'sum_over_batch_size' reduction type. - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... gamma=3, from_logits=True) >>> loss(y_true, y_pred) 0.647 >>> # Apply class weight - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=3, from_logits=True) >>> loss(y_true, y_pred) 0.482 >>> # Using 'sample_weight' attribute with focal effect - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... gamma=3, from_logits=True) >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]) 0.133 >>> # Apply class weight - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=3, from_logits=True) >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]) 0.097 >>> # Using 'sum' reduction` type. - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... gamma=4, from_logits=True, ... reduction="sum") >>> loss(y_true, y_pred) 1.222 >>> # Apply class weight - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=4, from_logits=True, ... reduction="sum") >>> loss(y_true, y_pred) 0.914 >>> # Using 'none' reduction type. - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... gamma=5, from_logits=True, ... reduction=None) >>> loss(y_true, y_pred) array([0.0017 1.1561], dtype=float32) >>> # Apply class weight - >>> loss = keras_core.losses.BinaryFocalCrossentropy( + >>> loss = keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=5, from_logits=True, ... reduction=None) >>> loss(y_true, y_pred) @@ -657,7 +657,7 @@ class BinaryFocalCrossentropy(LossFunctionWrapper): } -@keras_core_export("keras_core.losses.CategoricalCrossentropy") +@keras_export("keras.losses.CategoricalCrossentropy") class CategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. @@ -689,7 +689,7 @@ class CategoricalCrossentropy(LossFunctionWrapper): >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. - >>> cce = keras_core.losses.CategoricalCrossentropy() + >>> cce = keras.losses.CategoricalCrossentropy() >>> cce(y_true, y_pred) 1.177 @@ -698,13 +698,13 @@ class CategoricalCrossentropy(LossFunctionWrapper): 0.814 >>> # Using 'sum' reduction type. - >>> cce = keras_core.losses.CategoricalCrossentropy( + >>> cce = keras.losses.CategoricalCrossentropy( ... reduction="sum") >>> cce(y_true, y_pred) 2.354 >>> # Using 'none' reduction type. - >>> cce = keras_core.losses.CategoricalCrossentropy( + >>> cce = keras.losses.CategoricalCrossentropy( ... reduction=None) >>> cce(y_true, y_pred) array([0.0513, 2.303], dtype=float32) @@ -713,7 +713,7 @@ class CategoricalCrossentropy(LossFunctionWrapper): ```python model.compile(optimizer='sgd', - loss=keras_core.losses.CategoricalCrossentropy()) + loss=keras.losses.CategoricalCrossentropy()) ``` """ @@ -747,7 +747,7 @@ class CategoricalCrossentropy(LossFunctionWrapper): } -@keras_core_export("keras_core.losses.CategoricalFocalCrossentropy") +@keras_export("keras.losses.CategoricalFocalCrossentropy") class CategoricalFocalCrossentropy(LossFunctionWrapper): """Computes the alpha balanced focal crossentropy loss. @@ -820,7 +820,7 @@ class CategoricalFocalCrossentropy(LossFunctionWrapper): >>> y_true = [[0., 1., 0.], [0., 0., 1.]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. - >>> cce = keras_core.losses.CategoricalFocalCrossentropy() + >>> cce = keras.losses.CategoricalFocalCrossentropy() >>> cce(y_true, y_pred) 0.23315276 @@ -829,13 +829,13 @@ class CategoricalFocalCrossentropy(LossFunctionWrapper): 0.1632 >>> # Using 'sum' reduction type. - >>> cce = keras_core.losses.CategoricalFocalCrossentropy( + >>> cce = keras.losses.CategoricalFocalCrossentropy( ... reduction="sum") >>> cce(y_true, y_pred) 0.46631 >>> # Using 'none' reduction type. - >>> cce = keras_core.losses.CategoricalFocalCrossentropy( + >>> cce = keras.losses.CategoricalFocalCrossentropy( ... reduction=None) >>> cce(y_true, y_pred) array([3.2058331e-05, 4.6627346e-01], dtype=float32) @@ -844,7 +844,7 @@ class CategoricalFocalCrossentropy(LossFunctionWrapper): ```python model.compile(optimizer='adam', - loss=keras_core.losses.CategoricalFocalCrossentropy()) + loss=keras.losses.CategoricalFocalCrossentropy()) ``` """ @@ -887,7 +887,7 @@ class CategoricalFocalCrossentropy(LossFunctionWrapper): } -@keras_core_export("keras_core.losses.SparseCategoricalCrossentropy") +@keras_export("keras.losses.SparseCategoricalCrossentropy") class SparseCategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. @@ -916,7 +916,7 @@ class SparseCategoricalCrossentropy(LossFunctionWrapper): >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. - >>> scce = keras_core.losses.SparseCategoricalCrossentropy() + >>> scce = keras.losses.SparseCategoricalCrossentropy() >>> scce(y_true, y_pred) 1.177 @@ -925,13 +925,13 @@ class SparseCategoricalCrossentropy(LossFunctionWrapper): 0.814 >>> # Using 'sum' reduction type. - >>> scce = keras_core.losses.SparseCategoricalCrossentropy( + >>> scce = keras.losses.SparseCategoricalCrossentropy( ... reduction="sum") >>> scce(y_true, y_pred) 2.354 >>> # Using 'none' reduction type. - >>> scce = keras_core.losses.SparseCategoricalCrossentropy( + >>> scce = keras.losses.SparseCategoricalCrossentropy( ... reduction=None) >>> scce(y_true, y_pred) array([0.0513, 2.303], dtype=float32) @@ -940,7 +940,7 @@ class SparseCategoricalCrossentropy(LossFunctionWrapper): ```python model.compile(optimizer='sgd', - loss=keras_core.losses.SparseCategoricalCrossentropy()) + loss=keras.losses.SparseCategoricalCrossentropy()) ``` """ @@ -990,10 +990,10 @@ def convert_binary_labels_to_hinge(y_true): return updated_y_true -@keras_core_export( +@keras_export( [ - "keras_core.metrics.hinge", - "keras_core.losses.hinge", + "keras.metrics.hinge", + "keras.losses.hinge", ] ) def hinge(y_true, y_pred): @@ -1018,7 +1018,7 @@ def hinge(y_true, y_pred): >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.hinge(y_true, y_pred) + >>> loss = keras.losses.hinge(y_true, y_pred) """ y_pred = ops.convert_to_tensor(y_pred) y_true = ops.cast(y_true, dtype=y_pred.dtype) @@ -1027,10 +1027,10 @@ def hinge(y_true, y_pred): return ops.mean(ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.squared_hinge", - "keras_core.losses.squared_hinge", + "keras.metrics.squared_hinge", + "keras.losses.squared_hinge", ] ) def squared_hinge(y_true, y_pred): @@ -1055,7 +1055,7 @@ def squared_hinge(y_true, y_pred): >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.squared_hinge(y_true, y_pred) + >>> loss = keras.losses.squared_hinge(y_true, y_pred) """ y_pred = ops.convert_to_tensor(y_pred) y_true = ops.cast(y_true, y_pred.dtype) @@ -1065,10 +1065,10 @@ def squared_hinge(y_true, y_pred): ) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.categorical_hinge", - "keras_core.losses.categorical_hinge", + "keras.metrics.categorical_hinge", + "keras.losses.categorical_hinge", ] ) def categorical_hinge(y_true, y_pred): @@ -1096,7 +1096,7 @@ def categorical_hinge(y_true, y_pred): >>> y_true = np.random.randint(0, 3, size=(2,)) >>> y_true = np.eye(np.max(y_true) + 1)[y_true] >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.categorical_hinge(y_true, y_pred) + >>> loss = keras.losses.categorical_hinge(y_true, y_pred) """ y_pred = ops.convert_to_tensor(y_pred) y_true = ops.cast(y_true, y_pred.dtype) @@ -1106,15 +1106,15 @@ def categorical_hinge(y_true, y_pred): return ops.maximum(neg - pos + 1.0, zero) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.mean_squared_error", - "keras_core.losses.mean_squared_error", + "keras.metrics.mean_squared_error", + "keras.losses.mean_squared_error", # Legacy aliases - "keras_core._legacy.losses.mse", - "keras_core._legacy.losses.MSE", - "keras_core._legacy.metrics.mse", - "keras_core._legacy.metrics.MSE", + "keras._legacy.losses.mse", + "keras._legacy.losses.MSE", + "keras._legacy.metrics.mse", + "keras._legacy.metrics.MSE", ] ) def mean_squared_error(y_true, y_pred): @@ -1130,7 +1130,7 @@ def mean_squared_error(y_true, y_pred): >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.mean_squared_error(y_true, y_pred) + >>> loss = keras.losses.mean_squared_error(y_true, y_pred) Args: y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`. @@ -1145,15 +1145,15 @@ def mean_squared_error(y_true, y_pred): return ops.mean(ops.square(y_true - y_pred), axis=-1) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.mean_absolute_error", - "keras_core.losses.mean_absolute_error", + "keras.metrics.mean_absolute_error", + "keras.losses.mean_absolute_error", # Legacy aliases - "keras_core._legacy.losses.MAE", - "keras_core._legacy.losses.mae", - "keras_core._legacy.metrics.MAE", - "keras_core._legacy.metrics.mae", + "keras._legacy.losses.MAE", + "keras._legacy.losses.mae", + "keras._legacy.metrics.MAE", + "keras._legacy.metrics.mae", ] ) def mean_absolute_error(y_true, y_pred): @@ -1174,7 +1174,7 @@ def mean_absolute_error(y_true, y_pred): >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.mean_absolute_error(y_true, y_pred) + >>> loss = keras.losses.mean_absolute_error(y_true, y_pred) """ y_pred = ops.convert_to_tensor(y_pred) y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) @@ -1182,15 +1182,15 @@ def mean_absolute_error(y_true, y_pred): return ops.mean(ops.abs(y_true - y_pred), axis=-1) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.mean_absolute_percentage_error", - "keras_core.losses.mean_absolute_percentage_error", + "keras.metrics.mean_absolute_percentage_error", + "keras.losses.mean_absolute_percentage_error", # Legacy aliases - "keras_core._legacy.losses.mape", - "keras_core._legacy.losses.MAPE", - "keras_core._legacy.metrics.mape", - "keras_core._legacy.metrics.MAPE", + "keras._legacy.losses.mape", + "keras._legacy.losses.MAPE", + "keras._legacy.metrics.mape", + "keras._legacy.metrics.MAPE", ] ) def mean_absolute_percentage_error(y_true, y_pred): @@ -1203,7 +1203,7 @@ def mean_absolute_percentage_error(y_true, y_pred): ``` Division by zero is prevented by dividing by `maximum(y_true, epsilon)` - where `epsilon = keras_core.backend.epsilon()` + where `epsilon = keras.backend.epsilon()` (default to `1e-7`). Args: @@ -1218,7 +1218,7 @@ def mean_absolute_percentage_error(y_true, y_pred): >>> y_true = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.mean_absolute_percentage_error(y_true, y_pred) + >>> loss = keras.losses.mean_absolute_percentage_error(y_true, y_pred) """ y_pred = ops.convert_to_tensor(y_pred) y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) @@ -1228,15 +1228,15 @@ def mean_absolute_percentage_error(y_true, y_pred): return 100.0 * ops.mean(diff, axis=-1) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.mean_squared_logarithmic_error", - "keras_core.losses.mean_squared_logarithmic_error", + "keras.metrics.mean_squared_logarithmic_error", + "keras.losses.mean_squared_logarithmic_error", # Legacy aliases - "keras_core._legacy.losses.msle", - "keras_core._legacy.losses.MSLE", - "keras_core._legacy.metrics.msle", - "keras_core._legacy.metrics.MSLE", + "keras._legacy.losses.msle", + "keras._legacy.losses.MSLE", + "keras._legacy.metrics.msle", + "keras._legacy.metrics.MSLE", ] ) def mean_squared_logarithmic_error(y_true, y_pred): @@ -1249,7 +1249,7 @@ def mean_squared_logarithmic_error(y_true, y_pred): ``` Note that `y_pred` and `y_true` cannot be less or equal to 0. Negative - values and 0 values will be replaced with `keras_core.backend.epsilon()` + values and 0 values will be replaced with `keras.backend.epsilon()` (default to `1e-7`). Args: @@ -1264,7 +1264,7 @@ def mean_squared_logarithmic_error(y_true, y_pred): >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.mean_squared_logarithmic_error(y_true, y_pred) + >>> loss = keras.losses.mean_squared_logarithmic_error(y_true, y_pred) """ epsilon = ops.convert_to_tensor(backend.epsilon()) y_pred = ops.convert_to_tensor(y_pred) @@ -1275,7 +1275,7 @@ def mean_squared_logarithmic_error(y_true, y_pred): return ops.mean(ops.square(first_log - second_log), axis=-1) -@keras_core_export("keras_core.losses.cosine_similarity") +@keras_export("keras.losses.cosine_similarity") def cosine_similarity(y_true, y_pred, axis=-1): """Computes the cosine similarity between labels and predictions. @@ -1304,7 +1304,7 @@ def cosine_similarity(y_true, y_pred, axis=-1): >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] - >>> loss = keras_core.losses.cosine_similarity(y_true, y_pred, axis=-1) + >>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1) [-0., -0.99999994, 0.99999994] """ y_pred = ops.convert_to_tensor(y_pred) @@ -1315,7 +1315,7 @@ def cosine_similarity(y_true, y_pred, axis=-1): return -ops.sum(y_true * y_pred, axis=axis) -@keras_core_export(["keras_core.losses.huber", "keras_core.metrics.huber"]) +@keras_export(["keras.losses.huber", "keras.metrics.huber"]) def huber(y_true, y_pred, delta=1.0): """Computes Huber loss value. @@ -1335,7 +1335,7 @@ def huber(y_true, y_pred, delta=1.0): >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] - >>> loss = keras_core.losses.huber(y_true, y_pred) + >>> loss = keras.losses.huber(y_true, y_pred) 0.155 @@ -1365,13 +1365,13 @@ def huber(y_true, y_pred, delta=1.0): ) -@keras_core_export( +@keras_export( [ - "keras_core.losses.log_cosh", - "keras_core.metrics.log_cosh", + "keras.losses.log_cosh", + "keras.metrics.log_cosh", # Legacy aliases - "keras_core._legacy.losses.logcosh", - "keras_core._legacy.metrics.logcosh", + "keras._legacy.losses.logcosh", + "keras._legacy.metrics.logcosh", ] ) def log_cosh(y_true, y_pred): @@ -1391,7 +1391,7 @@ def log_cosh(y_true, y_pred): >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] - >>> loss = keras_core.losses.log_cosh(y_true, y_pred) + >>> loss = keras.losses.log_cosh(y_true, y_pred) 0.108 Args: @@ -1412,17 +1412,17 @@ def log_cosh(y_true, y_pred): return ops.mean(_logcosh(y_pred - y_true), axis=-1) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.kl_divergence", - "keras_core.losses.kl_divergence", + "keras.metrics.kl_divergence", + "keras.losses.kl_divergence", # Legacy aliases - "keras_core._legacy.losses.KLD", - "keras_core._legacy.losses.kld", - "keras_core._legacy.losses.kullback_leibler_divergence", - "keras_core._legacy.metrics.KLD", - "keras_core._legacy.metrics.kld", - "keras_core._legacy.metrics.kullback_leibler_divergence", + "keras._legacy.losses.KLD", + "keras._legacy.losses.kld", + "keras._legacy.losses.kullback_leibler_divergence", + "keras._legacy.metrics.KLD", + "keras._legacy.metrics.kld", + "keras._legacy.metrics.kullback_leibler_divergence", ] ) def kl_divergence(y_true, y_pred): @@ -1445,7 +1445,7 @@ def kl_divergence(y_true, y_pred): >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float32) >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.kl_divergence(y_true, y_pred) + >>> loss = keras.losses.kl_divergence(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = ops.clip(y_true, 1e-7, 1) >>> y_pred = ops.clip(y_pred, 1e-7, 1) @@ -1459,10 +1459,10 @@ def kl_divergence(y_true, y_pred): return ops.sum(y_true * ops.log(y_true / y_pred), axis=-1) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.poisson", - "keras_core.losses.poisson", + "keras.metrics.poisson", + "keras.losses.poisson", ] ) def poisson(y_true, y_pred): @@ -1485,7 +1485,7 @@ def poisson(y_true, y_pred): >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) - >>> loss = keras_core.losses.poisson(y_true, y_pred) + >>> loss = keras.losses.poisson(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_pred = y_pred + 1e-7 >>> assert np.allclose( @@ -1498,10 +1498,10 @@ def poisson(y_true, y_pred): return ops.mean(y_pred - y_true * ops.log(y_pred + epsilon), axis=-1) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.categorical_crossentropy", - "keras_core.losses.categorical_crossentropy", + "keras.metrics.categorical_crossentropy", + "keras.losses.categorical_crossentropy", ] ) def categorical_crossentropy( @@ -1527,7 +1527,7 @@ def categorical_crossentropy( >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] - >>> loss = keras_core.losses.categorical_crossentropy(y_true, y_pred) + >>> loss = keras.losses.categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss array([0.0513, 2.303], dtype=float32) @@ -1561,10 +1561,10 @@ def categorical_crossentropy( ) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.categorical_focal_crossentropy", - "keras_core.losses.categorical_focal_crossentropy", + "keras.metrics.categorical_focal_crossentropy", + "keras.losses.categorical_focal_crossentropy", ] ) def categorical_focal_crossentropy( @@ -1605,7 +1605,7 @@ def categorical_focal_crossentropy( >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.9, 0.05], [0.1, 0.85, 0.05]] - >>> loss = keras_core.losses.categorical_focal_crossentropy(y_true, y_pred) + >>> loss = keras.losses.categorical_focal_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss array([2.63401289e-04, 6.75912094e-01], dtype=float32) @@ -1657,10 +1657,10 @@ def categorical_focal_crossentropy( return focal_cce -@keras_core_export( +@keras_export( [ - "keras_core.metrics.sparse_categorical_crossentropy", - "keras_core.losses.sparse_categorical_crossentropy", + "keras.metrics.sparse_categorical_crossentropy", + "keras.losses.sparse_categorical_crossentropy", ] ) def sparse_categorical_crossentropy( @@ -1688,7 +1688,7 @@ def sparse_categorical_crossentropy( >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] - >>> loss = keras_core.losses.sparse_categorical_crossentropy(y_true, y_pred) + >>> loss = keras.losses.sparse_categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss array([0.0513, 2.303], dtype=float32) @@ -1721,10 +1721,10 @@ def sparse_categorical_crossentropy( return res -@keras_core_export( +@keras_export( [ - "keras_core.metrics.binary_crossentropy", - "keras_core.losses.binary_crossentropy", + "keras.metrics.binary_crossentropy", + "keras.losses.binary_crossentropy", ] ) def binary_crossentropy( @@ -1750,7 +1750,7 @@ def binary_crossentropy( >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] - >>> loss = keras_core.losses.binary_crossentropy(y_true, y_pred) + >>> loss = keras.losses.binary_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss array([0.916 , 0.714], dtype=float32) @@ -1767,10 +1767,10 @@ def binary_crossentropy( ) -@keras_core_export( +@keras_export( [ - "keras_core.metrics.binary_focal_crossentropy", - "keras_core.losses.binary_focal_crossentropy", + "keras.metrics.binary_focal_crossentropy", + "keras.losses.binary_focal_crossentropy", ] ) def binary_focal_crossentropy( @@ -1826,7 +1826,7 @@ def binary_focal_crossentropy( >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] - >>> loss = keras_core.losses.binary_focal_crossentropy( + >>> loss = keras.losses.binary_focal_crossentropy( ... y_true, y_pred, gamma=2) >>> assert loss.shape == (2,) >>> loss diff --git a/keras_core/losses/losses_test.py b/keras/losses/losses_test.py similarity index 99% rename from keras_core/losses/losses_test.py rename to keras/losses/losses_test.py index 6568e4d6d..7942a0994 100644 --- a/keras_core/losses/losses_test.py +++ b/keras/losses/losses_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import testing -from keras_core.losses import losses +from keras import testing +from keras.losses import losses class MeanSquaredErrorTest(testing.TestCase): diff --git a/keras_core/metrics/__init__.py b/keras/metrics/__init__.py similarity index 57% rename from keras_core/metrics/__init__.py rename to keras/metrics/__init__.py index 3ec9fae23..fdd1d1fd1 100644 --- a/keras_core/metrics/__init__.py +++ b/keras/metrics/__init__.py @@ -1,52 +1,52 @@ -from keras_core.api_export import keras_core_export -from keras_core.metrics.accuracy_metrics import Accuracy -from keras_core.metrics.accuracy_metrics import BinaryAccuracy -from keras_core.metrics.accuracy_metrics import CategoricalAccuracy -from keras_core.metrics.accuracy_metrics import SparseCategoricalAccuracy -from keras_core.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy -from keras_core.metrics.accuracy_metrics import TopKCategoricalAccuracy -from keras_core.metrics.confusion_metrics import AUC -from keras_core.metrics.confusion_metrics import FalseNegatives -from keras_core.metrics.confusion_metrics import FalsePositives -from keras_core.metrics.confusion_metrics import Precision -from keras_core.metrics.confusion_metrics import PrecisionAtRecall -from keras_core.metrics.confusion_metrics import Recall -from keras_core.metrics.confusion_metrics import RecallAtPrecision -from keras_core.metrics.confusion_metrics import SensitivityAtSpecificity -from keras_core.metrics.confusion_metrics import SpecificityAtSensitivity -from keras_core.metrics.confusion_metrics import TrueNegatives -from keras_core.metrics.confusion_metrics import TruePositives -from keras_core.metrics.f_score_metrics import F1Score -from keras_core.metrics.f_score_metrics import FBetaScore -from keras_core.metrics.hinge_metrics import CategoricalHinge -from keras_core.metrics.hinge_metrics import Hinge -from keras_core.metrics.hinge_metrics import SquaredHinge -from keras_core.metrics.iou_metrics import BinaryIoU -from keras_core.metrics.iou_metrics import IoU -from keras_core.metrics.iou_metrics import MeanIoU -from keras_core.metrics.iou_metrics import OneHotIoU -from keras_core.metrics.iou_metrics import OneHotMeanIoU -from keras_core.metrics.metric import Metric -from keras_core.metrics.probabilistic_metrics import BinaryCrossentropy -from keras_core.metrics.probabilistic_metrics import CategoricalCrossentropy -from keras_core.metrics.probabilistic_metrics import KLDivergence -from keras_core.metrics.probabilistic_metrics import Poisson -from keras_core.metrics.probabilistic_metrics import ( +from keras.api_export import keras_export +from keras.metrics.accuracy_metrics import Accuracy +from keras.metrics.accuracy_metrics import BinaryAccuracy +from keras.metrics.accuracy_metrics import CategoricalAccuracy +from keras.metrics.accuracy_metrics import SparseCategoricalAccuracy +from keras.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy +from keras.metrics.accuracy_metrics import TopKCategoricalAccuracy +from keras.metrics.confusion_metrics import AUC +from keras.metrics.confusion_metrics import FalseNegatives +from keras.metrics.confusion_metrics import FalsePositives +from keras.metrics.confusion_metrics import Precision +from keras.metrics.confusion_metrics import PrecisionAtRecall +from keras.metrics.confusion_metrics import Recall +from keras.metrics.confusion_metrics import RecallAtPrecision +from keras.metrics.confusion_metrics import SensitivityAtSpecificity +from keras.metrics.confusion_metrics import SpecificityAtSensitivity +from keras.metrics.confusion_metrics import TrueNegatives +from keras.metrics.confusion_metrics import TruePositives +from keras.metrics.f_score_metrics import F1Score +from keras.metrics.f_score_metrics import FBetaScore +from keras.metrics.hinge_metrics import CategoricalHinge +from keras.metrics.hinge_metrics import Hinge +from keras.metrics.hinge_metrics import SquaredHinge +from keras.metrics.iou_metrics import BinaryIoU +from keras.metrics.iou_metrics import IoU +from keras.metrics.iou_metrics import MeanIoU +from keras.metrics.iou_metrics import OneHotIoU +from keras.metrics.iou_metrics import OneHotMeanIoU +from keras.metrics.metric import Metric +from keras.metrics.probabilistic_metrics import BinaryCrossentropy +from keras.metrics.probabilistic_metrics import CategoricalCrossentropy +from keras.metrics.probabilistic_metrics import KLDivergence +from keras.metrics.probabilistic_metrics import Poisson +from keras.metrics.probabilistic_metrics import ( SparseCategoricalCrossentropy, ) -from keras_core.metrics.reduction_metrics import Mean -from keras_core.metrics.reduction_metrics import MeanMetricWrapper -from keras_core.metrics.reduction_metrics import Sum -from keras_core.metrics.regression_metrics import CosineSimilarity -from keras_core.metrics.regression_metrics import LogCoshError -from keras_core.metrics.regression_metrics import MeanAbsoluteError -from keras_core.metrics.regression_metrics import MeanAbsolutePercentageError -from keras_core.metrics.regression_metrics import MeanSquaredError -from keras_core.metrics.regression_metrics import MeanSquaredLogarithmicError -from keras_core.metrics.regression_metrics import R2Score -from keras_core.metrics.regression_metrics import RootMeanSquaredError -from keras_core.saving import serialization_lib -from keras_core.utils.naming import to_snake_case +from keras.metrics.reduction_metrics import Mean +from keras.metrics.reduction_metrics import MeanMetricWrapper +from keras.metrics.reduction_metrics import Sum +from keras.metrics.regression_metrics import CosineSimilarity +from keras.metrics.regression_metrics import LogCoshError +from keras.metrics.regression_metrics import MeanAbsoluteError +from keras.metrics.regression_metrics import MeanAbsolutePercentageError +from keras.metrics.regression_metrics import MeanSquaredError +from keras.metrics.regression_metrics import MeanSquaredLogarithmicError +from keras.metrics.regression_metrics import R2Score +from keras.metrics.regression_metrics import RootMeanSquaredError +from keras.saving import serialization_lib +from keras.utils.naming import to_snake_case ALL_OBJECTS = { # Base @@ -124,7 +124,7 @@ ALL_OBJECTS_DICT.update( ) -@keras_core_export("keras_core.metrics.serialize") +@keras_export("keras.metrics.serialize") def serialize(metric): """Serializes metric function or `Metric` instance. @@ -137,7 +137,7 @@ def serialize(metric): return serialization_lib.serialize_keras_object(metric) -@keras_core_export("keras_core.metrics.deserialize") +@keras_export("keras.metrics.deserialize") def deserialize(config, custom_objects=None): """Deserializes a serialized metric class/function instance. @@ -157,7 +157,7 @@ def deserialize(config, custom_objects=None): ) -@keras_core_export("keras_core.metrics.get") +@keras_export("keras.metrics.get") def get(identifier): """Retrieves a Keras metric as a `function`/`Metric` class instance. diff --git a/keras_core/metrics/accuracy_metrics.py b/keras/metrics/accuracy_metrics.py similarity index 88% rename from keras_core/metrics/accuracy_metrics.py rename to keras/metrics/accuracy_metrics.py index 86ae37b1f..ee73e0ac0 100644 --- a/keras_core/metrics/accuracy_metrics.py +++ b/keras/metrics/accuracy_metrics.py @@ -1,8 +1,8 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.losses.loss import squeeze_to_same_rank -from keras_core.metrics import reduction_metrics +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.losses.loss import squeeze_to_same_rank +from keras.metrics import reduction_metrics def accuracy(y_true, y_pred): @@ -15,7 +15,7 @@ def accuracy(y_true, y_pred): ) -@keras_core_export("keras_core.metrics.Accuracy") +@keras_export("keras.metrics.Accuracy") class Accuracy(reduction_metrics.MeanMetricWrapper): """Calculates how often predictions equal labels. @@ -33,7 +33,7 @@ class Accuracy(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.Accuracy() + >>> m = keras.metrics.Accuracy() >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]]) >>> m.result() 0.75 @@ -49,7 +49,7 @@ class Accuracy(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='binary_crossentropy', - metrics=[keras_core.metrics.Accuracy()]) + metrics=[keras.metrics.Accuracy()]) ``` """ @@ -60,7 +60,7 @@ class Accuracy(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.binary_accuracy") +@keras_export("keras.metrics.binary_accuracy") def binary_accuracy(y_true, y_pred, threshold=0.5): y_true = ops.convert_to_tensor(y_true) y_pred = ops.convert_to_tensor(y_pred) @@ -73,7 +73,7 @@ def binary_accuracy(y_true, y_pred, threshold=0.5): ) -@keras_core_export("keras_core.metrics.BinaryAccuracy") +@keras_export("keras.metrics.BinaryAccuracy") class BinaryAccuracy(reduction_metrics.MeanMetricWrapper): """Calculates how often predictions match binary labels. @@ -93,7 +93,7 @@ class BinaryAccuracy(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.BinaryAccuracy() + >>> m = keras.metrics.BinaryAccuracy() >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]]) >>> m.result() 0.75 @@ -109,7 +109,7 @@ class BinaryAccuracy(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='binary_crossentropy', - metrics=[keras_core.metrics.BinaryAccuracy()]) + metrics=[keras.metrics.BinaryAccuracy()]) ``` """ @@ -120,7 +120,7 @@ class BinaryAccuracy(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.categorical_accuracy") +@keras_export("keras.metrics.categorical_accuracy") def categorical_accuracy(y_true, y_pred): y_true = ops.argmax(y_true, axis=-1) @@ -152,7 +152,7 @@ def categorical_accuracy(y_true, y_pred): return matches -@keras_core_export("keras_core.metrics.CategoricalAccuracy") +@keras_export("keras.metrics.CategoricalAccuracy") class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper): """Calculates how often predictions match one-hot labels. @@ -177,7 +177,7 @@ class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.CategoricalAccuracy() + >>> m = keras.metrics.CategoricalAccuracy() >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], ... [0.05, 0.95, 0]]) >>> m.result() @@ -195,7 +195,7 @@ class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='categorical_crossentropy', - metrics=[keras_core.metrics.CategoricalAccuracy()]) + metrics=[keras.metrics.CategoricalAccuracy()]) ``` """ @@ -206,7 +206,7 @@ class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.sparse_categorical_accuracy") +@keras_export("keras.metrics.sparse_categorical_accuracy") def sparse_categorical_accuracy(y_true, y_pred): reshape_matches = False y_pred = ops.convert_to_tensor(y_pred) @@ -238,7 +238,7 @@ def sparse_categorical_accuracy(y_true, y_pred): return matches -@keras_core_export("keras_core.metrics.SparseCategoricalAccuracy") +@keras_export("keras.metrics.SparseCategoricalAccuracy") class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): """Calculates how often predictions match integer labels. @@ -263,7 +263,7 @@ class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.SparseCategoricalAccuracy() + >>> m = keras.metrics.SparseCategoricalAccuracy() >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]]) >>> m.result() 0.5 @@ -279,7 +279,7 @@ class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', - metrics=[keras_core.metrics.SparseCategoricalAccuracy()]) + metrics=[keras.metrics.SparseCategoricalAccuracy()]) ``` """ @@ -290,7 +290,7 @@ class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.top_k_categorical_accuracy") +@keras_export("keras.metrics.top_k_categorical_accuracy") def top_k_categorical_accuracy(y_true, y_pred, k=5): reshape_matches = False y_pred = ops.convert_to_tensor(y_pred) @@ -320,7 +320,7 @@ def top_k_categorical_accuracy(y_true, y_pred, k=5): return matches -@keras_core_export("keras_core.metrics.TopKCategoricalAccuracy") +@keras_export("keras.metrics.TopKCategoricalAccuracy") class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): """Computes how often targets are in the top `K` predictions. @@ -332,7 +332,7 @@ class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.TopKCategoricalAccuracy(k=1) + >>> m = keras.metrics.TopKCategoricalAccuracy(k=1) >>> m.update_state([[0, 0, 1], [0, 1, 0]], ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.result() @@ -350,7 +350,7 @@ class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='categorical_crossentropy', - metrics=[keras_core.metrics.TopKCategoricalAccuracy()]) + metrics=[keras.metrics.TopKCategoricalAccuracy()]) ``` """ @@ -367,7 +367,7 @@ class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype, "k": self.k} -@keras_core_export("keras_core.metrics.sparse_top_k_categorical_accuracy") +@keras_export("keras.metrics.sparse_top_k_categorical_accuracy") def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5): reshape_matches = False y_pred = ops.convert_to_tensor(y_pred) @@ -396,7 +396,7 @@ def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5): return matches -@keras_core_export("keras_core.metrics.SparseTopKCategoricalAccuracy") +@keras_export("keras.metrics.SparseTopKCategoricalAccuracy") class SparseTopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): """Computes how often integer targets are in the top `K` predictions. @@ -408,7 +408,7 @@ class SparseTopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.SparseTopKCategoricalAccuracy(k=1) + >>> m = keras.metrics.SparseTopKCategoricalAccuracy(k=1) >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.result() 0.5 @@ -424,7 +424,7 @@ class SparseTopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', - metrics=[keras_core.metrics.SparseTopKCategoricalAccuracy()]) + metrics=[keras.metrics.SparseTopKCategoricalAccuracy()]) ``` """ diff --git a/keras_core/metrics/accuracy_metrics_test.py b/keras/metrics/accuracy_metrics_test.py similarity index 99% rename from keras_core/metrics/accuracy_metrics_test.py rename to keras/metrics/accuracy_metrics_test.py index 38aad0af6..e56ba02ce 100644 --- a/keras_core/metrics/accuracy_metrics_test.py +++ b/keras/metrics/accuracy_metrics_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import testing -from keras_core.metrics import accuracy_metrics +from keras import testing +from keras.metrics import accuracy_metrics class AccuracyTest(testing.TestCase): diff --git a/keras_core/metrics/confusion_metrics.py b/keras/metrics/confusion_metrics.py similarity index 95% rename from keras_core/metrics/confusion_metrics.py rename to keras/metrics/confusion_metrics.py index b78747bd1..d11490d6a 100644 --- a/keras_core/metrics/confusion_metrics.py +++ b/keras/metrics/confusion_metrics.py @@ -1,13 +1,13 @@ import numpy as np -from keras_core import activations -from keras_core import backend -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.metrics import metrics_utils -from keras_core.metrics.metric import Metric -from keras_core.utils.python_utils import to_list +from keras import activations +from keras import backend +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.metrics import metrics_utils +from keras.metrics.metric import Metric +from keras.utils.python_utils import to_list class _ConfusionMatrixConditionCount(Metric): @@ -75,7 +75,7 @@ class _ConfusionMatrixConditionCount(Metric): return {**base_config, **config} -@keras_core_export("keras_core.metrics.FalsePositives") +@keras_export("keras.metrics.FalsePositives") class FalsePositives(_ConfusionMatrixConditionCount): """Calculates the number of false positives. @@ -99,7 +99,7 @@ class FalsePositives(_ConfusionMatrixConditionCount): Standalone usage: - >>> m = keras_core.metrics.FalsePositives() + >>> m = keras.metrics.FalsePositives() >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1]) >>> m.result() 2.0 @@ -119,7 +119,7 @@ class FalsePositives(_ConfusionMatrixConditionCount): ) -@keras_core_export("keras_core.metrics.FalseNegatives") +@keras_export("keras.metrics.FalseNegatives") class FalseNegatives(_ConfusionMatrixConditionCount): """Calculates the number of false negatives. @@ -143,7 +143,7 @@ class FalseNegatives(_ConfusionMatrixConditionCount): Standalone usage: - >>> m = keras_core.metrics.FalseNegatives() + >>> m = keras.metrics.FalseNegatives() >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0]) >>> m.result() 2.0 @@ -163,7 +163,7 @@ class FalseNegatives(_ConfusionMatrixConditionCount): ) -@keras_core_export("keras_core.metrics.TrueNegatives") +@keras_export("keras.metrics.TrueNegatives") class TrueNegatives(_ConfusionMatrixConditionCount): """Calculates the number of true negatives. @@ -187,7 +187,7 @@ class TrueNegatives(_ConfusionMatrixConditionCount): Standalone usage: - >>> m = keras_core.metrics.TrueNegatives() + >>> m = keras.metrics.TrueNegatives() >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0]) >>> m.result() 2.0 @@ -207,7 +207,7 @@ class TrueNegatives(_ConfusionMatrixConditionCount): ) -@keras_core_export("keras_core.metrics.TruePositives") +@keras_export("keras.metrics.TruePositives") class TruePositives(_ConfusionMatrixConditionCount): """Calculates the number of true positives. @@ -231,7 +231,7 @@ class TruePositives(_ConfusionMatrixConditionCount): Standalone usage: - >>> m = keras_core.metrics.TruePositives() + >>> m = keras.metrics.TruePositives() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result() 2.0 @@ -251,7 +251,7 @@ class TruePositives(_ConfusionMatrixConditionCount): ) -@keras_core_export("keras_core.metrics.Precision") +@keras_export("keras.metrics.Precision") class Precision(Metric): """Computes the precision of the predictions with respect to the labels. @@ -293,7 +293,7 @@ class Precision(Metric): Standalone usage: - >>> m = keras_core.metrics.Precision() + >>> m = keras.metrics.Precision() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result() 0.6666667 @@ -305,14 +305,14 @@ class Precision(Metric): >>> # With top_k=2, it will calculate precision over y_true[:2] >>> # and y_pred[:2] - >>> m = keras_core.metrics.Precision(top_k=2) + >>> m = keras.metrics.Precision(top_k=2) >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) >>> m.result() 0.0 >>> # With top_k=4, it will calculate precision over y_true[:4] >>> # and y_pred[:4] - >>> m = keras_core.metrics.Precision(top_k=4) + >>> m = keras.metrics.Precision(top_k=4) >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) >>> m.result() 0.5 @@ -322,15 +322,15 @@ class Precision(Metric): ```python model.compile(optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.Precision()]) + metrics=[keras.metrics.Precision()]) ``` Usage with a loss with `from_logits=True`: ```python model.compile(optimizer='adam', - loss=keras_core.losses.BinaryCrossentropy(from_logits=True), - metrics=[keras_core.metrics.Precision(thresholds=0)]) + loss=keras.losses.BinaryCrossentropy(from_logits=True), + metrics=[keras.metrics.Precision(thresholds=0)]) ``` """ @@ -408,7 +408,7 @@ class Precision(Metric): return {**base_config, **config} -@keras_core_export("keras_core.metrics.Recall") +@keras_export("keras.metrics.Recall") class Recall(Metric): """Computes the recall of the predictions with respect to the labels. @@ -448,7 +448,7 @@ class Recall(Metric): Standalone usage: - >>> m = keras_core.metrics.Recall() + >>> m = keras.metrics.Recall() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result() 0.6666667 @@ -463,15 +463,15 @@ class Recall(Metric): ```python model.compile(optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.Recall()]) + metrics=[keras.metrics.Recall()]) ``` Usage with a loss with `from_logits=True`: ```python model.compile(optimizer='adam', - loss=keras_core.losses.BinaryCrossentropy(from_logits=True), - metrics=[keras_core.metrics.Recall(thresholds=0)]) + loss=keras.losses.BinaryCrossentropy(from_logits=True), + metrics=[keras.metrics.Recall(thresholds=0)]) ``` """ @@ -666,7 +666,7 @@ class SensitivitySpecificityBase(Metric): return ops.where(feasible_exists, max_dependent, 0.0) -@keras_core_export("keras_core.metrics.SensitivityAtSpecificity") +@keras_export("keras.metrics.SensitivityAtSpecificity") class SensitivityAtSpecificity(SensitivitySpecificityBase): """Computes best sensitivity where specificity is >= specified value. @@ -704,7 +704,7 @@ class SensitivityAtSpecificity(SensitivitySpecificityBase): Standalone usage: - >>> m = keras_core.metrics.SensitivityAtSpecificity(0.5) + >>> m = keras.metrics.SensitivityAtSpecificity(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result() 0.5 @@ -721,7 +721,7 @@ class SensitivityAtSpecificity(SensitivitySpecificityBase): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.SensitivityAtSpecificity()]) + metrics=[keras.metrics.SensitivityAtSpecificity()]) ``` """ @@ -770,7 +770,7 @@ class SensitivityAtSpecificity(SensitivitySpecificityBase): return {**base_config, **config} -@keras_core_export("keras_core.metrics.SpecificityAtSensitivity") +@keras_export("keras.metrics.SpecificityAtSensitivity") class SpecificityAtSensitivity(SensitivitySpecificityBase): """Computes best specificity where sensitivity is >= specified value. @@ -808,7 +808,7 @@ class SpecificityAtSensitivity(SensitivitySpecificityBase): Standalone usage: - >>> m = keras_core.metrics.SpecificityAtSensitivity(0.5) + >>> m = keras.metrics.SpecificityAtSensitivity(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result() 0.66666667 @@ -825,7 +825,7 @@ class SpecificityAtSensitivity(SensitivitySpecificityBase): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.SpecificityAtSensitivity()]) + metrics=[keras.metrics.SpecificityAtSensitivity()]) ``` """ @@ -874,7 +874,7 @@ class SpecificityAtSensitivity(SensitivitySpecificityBase): return {**base_config, **config} -@keras_core_export("keras_core.metrics.PrecisionAtRecall") +@keras_export("keras.metrics.PrecisionAtRecall") class PrecisionAtRecall(SensitivitySpecificityBase): """Computes best precision where recall is >= specified value. @@ -903,7 +903,7 @@ class PrecisionAtRecall(SensitivitySpecificityBase): Standalone usage: - >>> m = keras_core.metrics.PrecisionAtRecall(0.5) + >>> m = keras.metrics.PrecisionAtRecall(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result() 0.5 @@ -920,7 +920,7 @@ class PrecisionAtRecall(SensitivitySpecificityBase): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.PrecisionAtRecall(recall=0.8)]) + metrics=[keras.metrics.PrecisionAtRecall(recall=0.8)]) ``` """ @@ -961,7 +961,7 @@ class PrecisionAtRecall(SensitivitySpecificityBase): return {**base_config, **config} -@keras_core_export("keras_core.metrics.RecallAtPrecision") +@keras_export("keras.metrics.RecallAtPrecision") class RecallAtPrecision(SensitivitySpecificityBase): """Computes best recall where precision is >= specified value. @@ -993,7 +993,7 @@ class RecallAtPrecision(SensitivitySpecificityBase): Standalone usage: - >>> m = keras_core.metrics.RecallAtPrecision(0.8) + >>> m = keras.metrics.RecallAtPrecision(0.8) >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.result() 0.5 @@ -1010,7 +1010,7 @@ class RecallAtPrecision(SensitivitySpecificityBase): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.RecallAtPrecision(precision=0.8)]) + metrics=[keras.metrics.RecallAtPrecision(precision=0.8)]) ``` """ @@ -1059,7 +1059,7 @@ class RecallAtPrecision(SensitivitySpecificityBase): return {**base_config, **config} -@keras_core_export("keras_core.metrics.AUC") +@keras_export("keras.metrics.AUC") class AUC(Metric): """Approximates the AUC (Area under the curve) of the ROC or PR curves. @@ -1149,7 +1149,7 @@ class AUC(Metric): Standalone usage: - >>> m = keras_core.metrics.AUC(num_thresholds=3) + >>> m = keras.metrics.AUC(num_thresholds=3) >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7] >>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2] @@ -1170,13 +1170,13 @@ class AUC(Metric): ```python # Reports the AUC of a model outputting a probability. model.compile(optimizer='sgd', - loss=keras_core.losses.BinaryCrossentropy(), - metrics=[keras_core.metrics.AUC()]) + loss=keras.losses.BinaryCrossentropy(), + metrics=[keras.metrics.AUC()]) # Reports the AUC of a model outputting a logit. model.compile(optimizer='sgd', - loss=keras_core.losses.BinaryCrossentropy(from_logits=True), - metrics=[keras_core.metrics.AUC(from_logits=True)]) + loss=keras.losses.BinaryCrossentropy(from_logits=True), + metrics=[keras.metrics.AUC(from_logits=True)]) ``` """ diff --git a/keras_core/metrics/confusion_metrics_test.py b/keras/metrics/confusion_metrics_test.py similarity index 99% rename from keras_core/metrics/confusion_metrics_test.py rename to keras/metrics/confusion_metrics_test.py index 9f83b23f3..1be72a57a 100644 --- a/keras_core/metrics/confusion_metrics_test.py +++ b/keras/metrics/confusion_metrics_test.py @@ -6,12 +6,12 @@ from absl import logging from absl.testing import parameterized from tensorflow.python.ops.numpy_ops import np_config -from keras_core import layers -from keras_core import metrics -from keras_core import models -from keras_core import ops -from keras_core import testing -from keras_core.metrics import metrics_utils +from keras import layers +from keras import metrics +from keras import models +from keras import ops +from keras import testing +from keras.metrics import metrics_utils # TODO: remove reliance on this (or alternatively, turn it on by default). # This is no longer needed with tf-nightly. @@ -1116,7 +1116,7 @@ class RecallAtPrecisionTest(testing.TestCase, parameterized.TestCase): @pytest.mark.requires_trainable_backend def test_end_to_end(self): - # Test for https://github.com/keras-team/keras-core/issues/718 + # Test for https://github.com/keras-team/keras/issues/718 model = models.Sequential( [ layers.Input((1,)), diff --git a/keras_core/metrics/f_score_metrics.py b/keras/metrics/f_score_metrics.py similarity index 96% rename from keras_core/metrics/f_score_metrics.py rename to keras/metrics/f_score_metrics.py index 45bc7ac86..2bac6d443 100644 --- a/keras_core/metrics/f_score_metrics.py +++ b/keras/metrics/f_score_metrics.py @@ -1,11 +1,11 @@ -from keras_core import backend -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.metrics.metric import Metric +from keras import backend +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.metrics.metric import Metric -@keras_core_export("keras_core.metrics.FBetaScore") +@keras_export("keras.metrics.FBetaScore") class FBetaScore(Metric): """Computes F-Beta score. @@ -50,7 +50,7 @@ class FBetaScore(Metric): Example: - >>> metric = keras_core.metrics.FBetaScore(beta=2.0, threshold=0.5) + >>> metric = keras.metrics.FBetaScore(beta=2.0, threshold=0.5) >>> y_true = np.array([[1, 1, 1], ... [1, 0, 0], ... [1, 1, 0]], np.int32) @@ -245,7 +245,7 @@ class FBetaScore(Metric): v.assign(ops.zeros(v.shape, dtype=v.dtype)) -@keras_core_export("keras_core.metrics.F1Score") +@keras_export("keras.metrics.F1Score") class F1Score(FBetaScore): r"""Computes F-1 Score. @@ -285,7 +285,7 @@ class F1Score(FBetaScore): Example: - >>> metric = keras_core.metrics.F1Score(threshold=0.5) + >>> metric = keras.metrics.F1Score(threshold=0.5) >>> y_true = np.array([[1, 1, 1], ... [1, 0, 0], ... [1, 1, 0]], np.int32) diff --git a/keras_core/metrics/f_score_metrics_test.py b/keras/metrics/f_score_metrics_test.py similarity index 99% rename from keras_core/metrics/f_score_metrics_test.py rename to keras/metrics/f_score_metrics_test.py index 8ee61a8ab..dccf005b9 100644 --- a/keras_core/metrics/f_score_metrics_test.py +++ b/keras/metrics/f_score_metrics_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras_core import testing -from keras_core.metrics import f_score_metrics +from keras import testing +from keras.metrics import f_score_metrics class FBetaScoreTest(parameterized.TestCase, testing.TestCase): diff --git a/keras_core/metrics/hinge_metrics.py b/keras/metrics/hinge_metrics.py similarity index 82% rename from keras_core/metrics/hinge_metrics.py rename to keras/metrics/hinge_metrics.py index 881aa1a4f..4d6ad41bd 100644 --- a/keras_core/metrics/hinge_metrics.py +++ b/keras/metrics/hinge_metrics.py @@ -1,11 +1,11 @@ -from keras_core.api_export import keras_core_export -from keras_core.losses.losses import categorical_hinge -from keras_core.losses.losses import hinge -from keras_core.losses.losses import squared_hinge -from keras_core.metrics import reduction_metrics +from keras.api_export import keras_export +from keras.losses.losses import categorical_hinge +from keras.losses.losses import hinge +from keras.losses.losses import squared_hinge +from keras.metrics import reduction_metrics -@keras_core_export("keras_core.metrics.Hinge") +@keras_export("keras.metrics.Hinge") class Hinge(reduction_metrics.MeanMetricWrapper): """Computes the hinge metric between `y_true` and `y_pred`. @@ -18,7 +18,7 @@ class Hinge(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.Hinge() + >>> m = keras.metrics.Hinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result() 1.3 @@ -36,7 +36,7 @@ class Hinge(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.SquaredHinge") +@keras_export("keras.metrics.SquaredHinge") class SquaredHinge(reduction_metrics.MeanMetricWrapper): """Computes the hinge metric between `y_true` and `y_pred`. @@ -49,7 +49,7 @@ class SquaredHinge(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.SquaredHinge() + >>> m = keras.metrics.SquaredHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result() 1.86 @@ -67,7 +67,7 @@ class SquaredHinge(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.CategoricalHinge") +@keras_export("keras.metrics.CategoricalHinge") class CategoricalHinge(reduction_metrics.MeanMetricWrapper): """Computes the categorical hinge metric between `y_true` and `y_pred`. @@ -76,7 +76,7 @@ class CategoricalHinge(reduction_metrics.MeanMetricWrapper): dtype: (Optional) data type of the metric result. Standalone usage: - >>> m = keras_core.metrics.CategoricalHinge() + >>> m = keras.metrics.CategoricalHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.4000001 diff --git a/keras_core/metrics/hinge_metrics_test.py b/keras/metrics/hinge_metrics_test.py similarity index 98% rename from keras_core/metrics/hinge_metrics_test.py rename to keras/metrics/hinge_metrics_test.py index e4aa66c18..536516af1 100644 --- a/keras_core/metrics/hinge_metrics_test.py +++ b/keras/metrics/hinge_metrics_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import testing -from keras_core.metrics import hinge_metrics +from keras import testing +from keras.metrics import hinge_metrics class HingeTest(testing.TestCase): diff --git a/keras_core/metrics/iou_metrics.py b/keras/metrics/iou_metrics.py similarity index 95% rename from keras_core/metrics/iou_metrics.py rename to keras/metrics/iou_metrics.py index fbec25aad..a41faec9b 100644 --- a/keras_core/metrics/iou_metrics.py +++ b/keras/metrics/iou_metrics.py @@ -1,9 +1,9 @@ -from keras_core import backend -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.metrics.metric import Metric -from keras_core.metrics.metrics_utils import confusion_matrix +from keras import backend +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.metrics.metric import Metric +from keras.metrics.metrics_utils import confusion_matrix class _IoUBase(Metric): @@ -138,7 +138,7 @@ class _IoUBase(Metric): ) -@keras_core_export("keras_core.metrics.IoU") +@keras_export("keras.metrics.IoU") class IoU(_IoUBase): """Computes the Intersection-Over-Union metric for specific target classes. @@ -191,7 +191,7 @@ class IoU(_IoUBase): >>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1] >>> # iou = true_positives / (sum_row + sum_col - true_positives)) >>> # iou = [0.33, 0.33] - >>> m = keras_core.metrics.IoU(num_classes=2, target_class_ids=[0]) + >>> m = keras.metrics.IoU(num_classes=2, target_class_ids=[0]) >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1]) >>> m.result() 0.33333334 @@ -213,7 +213,7 @@ class IoU(_IoUBase): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.IoU(num_classes=2, target_class_ids=[0])]) + metrics=[keras.metrics.IoU(num_classes=2, target_class_ids=[0])]) ``` """ @@ -295,7 +295,7 @@ class IoU(_IoUBase): return dict(list(base_config.items()) + list(config.items())) -@keras_core_export("keras_core.metrics.BinaryIoU") +@keras_export("keras.metrics.BinaryIoU") class BinaryIoU(IoU): """Computes the Intersection-Over-Union metric for class 0 and/or 1. @@ -340,7 +340,7 @@ class BinaryIoU(IoU): Standalone usage: - >>> m = keras_core.metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3) + >>> m = keras.metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3) >>> m.update_state([0, 1, 0, 1], [0.1, 0.2, 0.4, 0.7]) >>> m.result() 0.33333334 @@ -362,7 +362,7 @@ class BinaryIoU(IoU): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.BinaryIoU( + metrics=[keras.metrics.BinaryIoU( target_class_ids=[0], threshold=0.5 )] @@ -417,7 +417,7 @@ class BinaryIoU(IoU): } -@keras_core_export("keras_core.metrics.MeanIoU") +@keras_export("keras.metrics.MeanIoU") class MeanIoU(IoU): """Computes the mean Intersection-Over-Union metric. @@ -466,7 +466,7 @@ class MeanIoU(IoU): >>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1] >>> # iou = true_positives / (sum_row + sum_col - true_positives)) >>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33 - >>> m = keras_core.metrics.MeanIoU(num_classes=2) + >>> m = keras.metrics.MeanIoU(num_classes=2) >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1]) >>> m.result() 0.33333334 @@ -483,7 +483,7 @@ class MeanIoU(IoU): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.MeanIoU(num_classes=2)]) + metrics=[keras.metrics.MeanIoU(num_classes=2)]) ``` """ @@ -521,7 +521,7 @@ class MeanIoU(IoU): } -@keras_core_export("keras_core.metrics.OneHotIoU") +@keras_export("keras.metrics.OneHotIoU") class OneHotIoU(IoU): """Computes the Intersection-Over-Union metric for one-hot encoded labels. @@ -578,7 +578,7 @@ class OneHotIoU(IoU): >>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], ... [0.1, 0.4, 0.5]]) >>> sample_weight = [0.1, 0.2, 0.3, 0.4] - >>> m = keras_core.metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2]) + >>> m = keras.metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2]) >>> m.update_state( ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight) >>> # cm = [[0, 0, 0.2+0.4], @@ -597,7 +597,7 @@ class OneHotIoU(IoU): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.OneHotIoU( + metrics=[keras.metrics.OneHotIoU( num_classes=3, target_class_id=[1] )] @@ -638,7 +638,7 @@ class OneHotIoU(IoU): } -@keras_core_export("keras_core.metrics.OneHotMeanIoU") +@keras_export("keras.metrics.OneHotMeanIoU") class OneHotMeanIoU(MeanIoU): """Computes mean Intersection-Over-Union metric for one-hot encoded labels. @@ -694,7 +694,7 @@ class OneHotMeanIoU(MeanIoU): >>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], ... [0.1, 0.4, 0.5]]) >>> sample_weight = [0.1, 0.2, 0.3, 0.4] - >>> m = keras_core.metrics.OneHotMeanIoU(num_classes=3) + >>> m = keras.metrics.OneHotMeanIoU(num_classes=3) >>> m.update_state( ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight) >>> # cm = [[0, 0, 0.2+0.4], @@ -713,7 +713,7 @@ class OneHotMeanIoU(MeanIoU): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.OneHotMeanIoU(num_classes=3)]) + metrics=[keras.metrics.OneHotMeanIoU(num_classes=3)]) ``` """ diff --git a/keras_core/metrics/iou_metrics_test.py b/keras/metrics/iou_metrics_test.py similarity index 99% rename from keras_core/metrics/iou_metrics_test.py rename to keras/metrics/iou_metrics_test.py index 5bdb1c15d..2da7727df 100644 --- a/keras_core/metrics/iou_metrics_test.py +++ b/keras/metrics/iou_metrics_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import testing -from keras_core.metrics import iou_metrics as metrics +from keras import testing +from keras.metrics import iou_metrics as metrics class IoUTest(testing.TestCase): diff --git a/keras_core/metrics/metric.py b/keras/metrics/metric.py similarity index 90% rename from keras_core/metrics/metric.py rename to keras/metrics/metric.py index fa98585a3..27796ee6a 100644 --- a/keras_core/metrics/metric.py +++ b/keras/metrics/metric.py @@ -1,12 +1,12 @@ -from keras_core import backend -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.utils.naming import auto_name -from keras_core.utils.tracking import Tracker +from keras import backend +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.utils.naming import auto_name +from keras.utils.tracking import Tracker -@keras_core_export(["keras_core.Metric", "keras_core.metrics.Metric"]) +@keras_export(["keras.Metric", "keras.metrics.Metric"]) class Metric: """Encapsulates metric logic and state. @@ -26,14 +26,14 @@ class Metric: Usage with `compile()` API: ```python - model = keras_core.Sequential() - model.add(keras_core.layers.Dense(64, activation='relu')) - model.add(keras_core.layers.Dense(64, activation='relu')) - model.add(keras_core.layers.Dense(10, activation='softmax')) + model = keras.Sequential() + model.add(keras.layers.Dense(64, activation='relu')) + model.add(keras.layers.Dense(64, activation='relu')) + model.add(keras.layers.Dense(10, activation='softmax')) - model.compile(optimizer=keras_core.optimizers.RMSprop(0.01), - loss=keras_core.losses.CategoricalCrossentropy(), - metrics=[keras_core.metrics.CategoricalAccuracy()]) + model.compile(optimizer=keras.optimizers.RMSprop(0.01), + loss=keras.losses.CategoricalCrossentropy(), + metrics=[keras.metrics.CategoricalAccuracy()]) data = np.random.random((1000, 32)) labels = np.random.random((1000, 10)) diff --git a/keras_core/metrics/metric_test.py b/keras/metrics/metric_test.py similarity index 96% rename from keras_core/metrics/metric_test.py rename to keras/metrics/metric_test.py index 3b794e6e6..c14020a17 100644 --- a/keras_core/metrics/metric_test.py +++ b/keras/metrics/metric_test.py @@ -1,11 +1,11 @@ import numpy as np -from keras_core import backend -from keras_core import initializers -from keras_core import metrics as metrics_module -from keras_core import ops -from keras_core import testing -from keras_core.metrics.metric import Metric +from keras import backend +from keras import initializers +from keras import metrics as metrics_module +from keras import ops +from keras import testing +from keras.metrics.metric import Metric class ExampleMetric(Metric): diff --git a/keras_core/metrics/metrics_utils.py b/keras/metrics/metrics_utils.py similarity index 99% rename from keras_core/metrics/metrics_utils.py rename to keras/metrics/metrics_utils.py index 3aaea8329..497b0e7be 100644 --- a/keras_core/metrics/metrics_utils.py +++ b/keras/metrics/metrics_utils.py @@ -2,10 +2,10 @@ from enum import Enum import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core.losses.loss import squeeze_to_same_rank -from keras_core.utils.python_utils import to_list +from keras import backend +from keras import ops +from keras.losses.loss import squeeze_to_same_rank +from keras.utils.python_utils import to_list NEG_INF = -1e10 @@ -638,7 +638,7 @@ def confusion_matrix( For example: ```python - keras_core.metrics.metrics_utils.confusion_matrix([1, 2, 4], [2, 2, 4]) ==> + keras.metrics.metrics_utils.confusion_matrix([1, 2, 4], [2, 2, 4]) ==> [[0 0 0 0 0] [0 0 1 0 0] [0 0 1 0 0] diff --git a/keras_core/metrics/probabilistic_metrics.py b/keras/metrics/probabilistic_metrics.py similarity index 87% rename from keras_core/metrics/probabilistic_metrics.py rename to keras/metrics/probabilistic_metrics.py index 399b38ef9..dcba9fec7 100644 --- a/keras_core/metrics/probabilistic_metrics.py +++ b/keras/metrics/probabilistic_metrics.py @@ -1,13 +1,13 @@ -from keras_core.api_export import keras_core_export -from keras_core.losses.losses import binary_crossentropy -from keras_core.losses.losses import categorical_crossentropy -from keras_core.losses.losses import kl_divergence -from keras_core.losses.losses import poisson -from keras_core.losses.losses import sparse_categorical_crossentropy -from keras_core.metrics import reduction_metrics +from keras.api_export import keras_export +from keras.losses.losses import binary_crossentropy +from keras.losses.losses import categorical_crossentropy +from keras.losses.losses import kl_divergence +from keras.losses.losses import poisson +from keras.losses.losses import sparse_categorical_crossentropy +from keras.metrics import reduction_metrics -@keras_core_export("keras_core.metrics.KLDivergence") +@keras_export("keras.metrics.KLDivergence") class KLDivergence(reduction_metrics.MeanMetricWrapper): """Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`. @@ -24,7 +24,7 @@ class KLDivergence(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.KLDivergence() + >>> m = keras.metrics.KLDivergence() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result() 0.45814306 @@ -40,7 +40,7 @@ class KLDivergence(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.KLDivergence()]) + metrics=[keras.metrics.KLDivergence()]) ``` """ @@ -51,7 +51,7 @@ class KLDivergence(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.Poisson") +@keras_export("keras.metrics.Poisson") class Poisson(reduction_metrics.MeanMetricWrapper): """Computes the Poisson metric between `y_true` and `y_pred`. @@ -69,7 +69,7 @@ class Poisson(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.Poisson() + >>> m = keras.metrics.Poisson() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.49999997 @@ -85,7 +85,7 @@ class Poisson(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.Poisson()]) + metrics=[keras.metrics.Poisson()]) ``` """ @@ -96,7 +96,7 @@ class Poisson(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.BinaryCrossentropy") +@keras_export("keras.metrics.BinaryCrossentropy") class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. @@ -119,7 +119,7 @@ class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.BinaryCrossentropy() + >>> m = keras.metrics.BinaryCrossentropy() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result() 0.81492424 @@ -136,7 +136,7 @@ class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.BinaryCrossentropy()]) + metrics=[keras.metrics.BinaryCrossentropy()]) ``` """ @@ -166,7 +166,7 @@ class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper): } -@keras_core_export("keras_core.metrics.CategoricalCrossentropy") +@keras_export("keras.metrics.CategoricalCrossentropy") class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. @@ -200,7 +200,7 @@ class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): >>> # = -((log 0.95), (log 0.1)) >>> # = [0.051, 2.302] >>> # Reduced xent = (0.051 + 2.302) / 2 - >>> m = keras_core.metrics.CategoricalCrossentropy() + >>> m = keras.metrics.CategoricalCrossentropy() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result() @@ -219,7 +219,7 @@ class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.CategoricalCrossentropy()]) + metrics=[keras.metrics.CategoricalCrossentropy()]) ``` """ @@ -253,7 +253,7 @@ class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): } -@keras_core_export("keras_core.metrics.SparseCategoricalCrossentropy") +@keras_export("keras.metrics.SparseCategoricalCrossentropy") class SparseCategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. @@ -288,7 +288,7 @@ class SparseCategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): >>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]] >>> # xent = [0.0513, 2.3026] >>> # Reduced xent = (0.0513 + 2.3026) / 2 - >>> m = keras_core.metrics.SparseCategoricalCrossentropy() + >>> m = keras.metrics.SparseCategoricalCrossentropy() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result() @@ -307,7 +307,7 @@ class SparseCategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.SparseCategoricalCrossentropy()]) + metrics=[keras.metrics.SparseCategoricalCrossentropy()]) ``` """ diff --git a/keras_core/metrics/probabilistic_metrics_test.py b/keras/metrics/probabilistic_metrics_test.py similarity index 99% rename from keras_core/metrics/probabilistic_metrics_test.py rename to keras/metrics/probabilistic_metrics_test.py index 8b9ff75ca..1ff76a846 100644 --- a/keras_core/metrics/probabilistic_metrics_test.py +++ b/keras/metrics/probabilistic_metrics_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import metrics -from keras_core import testing +from keras import metrics +from keras import testing class KLDivergenceTest(testing.TestCase): diff --git a/keras_core/metrics/reduction_metrics.py b/keras/metrics/reduction_metrics.py similarity index 93% rename from keras_core/metrics/reduction_metrics.py rename to keras/metrics/reduction_metrics.py index d6fea82ee..4199b9c08 100644 --- a/keras_core/metrics/reduction_metrics.py +++ b/keras/metrics/reduction_metrics.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.losses import loss -from keras_core.metrics.metric import Metric -from keras_core.saving import serialization_lib +from keras import backend +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.losses import loss +from keras.metrics.metric import Metric +from keras.saving import serialization_lib def reduce_to_samplewise_values(values, sample_weight, reduce_fn, dtype): @@ -38,7 +38,7 @@ def reduce_to_samplewise_values(values, sample_weight, reduce_fn, dtype): return values, sample_weight -@keras_core_export("keras_core.metrics.Sum") +@keras_export("keras.metrics.Sum") class Sum(Metric): """Compute the (weighted) sum of the given values. @@ -87,7 +87,7 @@ class Sum(Metric): return ops.cast(self.total, self.dtype) -@keras_core_export("keras_core.metrics.Mean") +@keras_export("keras.metrics.Mean") class Mean(Metric): """Compute the (weighted) mean of the given values. @@ -155,7 +155,7 @@ class Mean(Metric): ) -@keras_core_export("keras_core.metrics.MeanMetricWrapper") +@keras_export("keras.metrics.MeanMetricWrapper") class MeanMetricWrapper(Mean): """Wrap a stateless metric function with the Mean metric. diff --git a/keras_core/metrics/reduction_metrics_test.py b/keras/metrics/reduction_metrics_test.py similarity index 97% rename from keras_core/metrics/reduction_metrics_test.py rename to keras/metrics/reduction_metrics_test.py index c9acc4d5e..6fe94654d 100644 --- a/keras_core/metrics/reduction_metrics_test.py +++ b/keras/metrics/reduction_metrics_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import testing -from keras_core.metrics import reduction_metrics +from keras import testing +from keras.metrics import reduction_metrics class SumTest(testing.TestCase): diff --git a/keras_core/metrics/regression_metrics.py b/keras/metrics/regression_metrics.py similarity index 89% rename from keras_core/metrics/regression_metrics.py rename to keras/metrics/regression_metrics.py index a0e196eb0..fb549e8f5 100644 --- a/keras_core/metrics/regression_metrics.py +++ b/keras/metrics/regression_metrics.py @@ -1,19 +1,19 @@ import warnings -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.losses.loss import squeeze_to_same_rank -from keras_core.losses.losses import log_cosh -from keras_core.losses.losses import mean_absolute_error -from keras_core.losses.losses import mean_absolute_percentage_error -from keras_core.losses.losses import mean_squared_error -from keras_core.losses.losses import mean_squared_logarithmic_error -from keras_core.metrics import reduction_metrics -from keras_core.utils.numerical_utils import normalize +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.losses.loss import squeeze_to_same_rank +from keras.losses.losses import log_cosh +from keras.losses.losses import mean_absolute_error +from keras.losses.losses import mean_absolute_percentage_error +from keras.losses.losses import mean_squared_error +from keras.losses.losses import mean_squared_logarithmic_error +from keras.metrics import reduction_metrics +from keras.utils.numerical_utils import normalize -@keras_core_export("keras_core.metrics.MeanSquaredError") +@keras_export("keras.metrics.MeanSquaredError") class MeanSquaredError(reduction_metrics.MeanMetricWrapper): """Computes the mean squared error between `y_true` and `y_pred`. @@ -29,7 +29,7 @@ class MeanSquaredError(reduction_metrics.MeanMetricWrapper): Example: - >>> m = keras_core.metrics.MeanSquaredError() + >>> m = keras.metrics.MeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.25 @@ -42,7 +42,7 @@ class MeanSquaredError(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.MeanAbsoluteError") +@keras_export("keras.metrics.MeanAbsoluteError") class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper): """Computes the mean absolute error between the labels and predictions. @@ -60,7 +60,7 @@ class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.MeanAbsoluteError() + >>> m = keras.metrics.MeanAbsoluteError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.25 @@ -76,7 +76,7 @@ class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.MeanAbsoluteError()]) + metrics=[keras.metrics.MeanAbsoluteError()]) ``` """ @@ -87,7 +87,7 @@ class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.MeanAbsolutePercentageError") +@keras_export("keras.metrics.MeanAbsolutePercentageError") class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper): """Computes mean absolute percentage error between `y_true` and `y_pred`. @@ -105,7 +105,7 @@ class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.MeanAbsolutePercentageError() + >>> m = keras.metrics.MeanAbsolutePercentageError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 250000000.0 @@ -121,7 +121,7 @@ class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.MeanAbsolutePercentageError()]) + metrics=[keras.metrics.MeanAbsolutePercentageError()]) ``` """ @@ -132,7 +132,7 @@ class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.MeanSquaredLogarithmicError") +@keras_export("keras.metrics.MeanSquaredLogarithmicError") class MeanSquaredLogarithmicError(reduction_metrics.MeanMetricWrapper): """Computes mean squared logarithmic error between `y_true` and `y_pred`. @@ -150,7 +150,7 @@ class MeanSquaredLogarithmicError(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.MeanSquaredLogarithmicError() + >>> m = keras.metrics.MeanSquaredLogarithmicError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.12011322 @@ -166,7 +166,7 @@ class MeanSquaredLogarithmicError(reduction_metrics.MeanMetricWrapper): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.MeanSquaredLogarithmicError()]) + metrics=[keras.metrics.MeanSquaredLogarithmicError()]) ``` """ @@ -177,7 +177,7 @@ class MeanSquaredLogarithmicError(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.RootMeanSquaredError") +@keras_export("keras.metrics.RootMeanSquaredError") class RootMeanSquaredError(reduction_metrics.Mean): """Computes root mean squared error metric between `y_true` and `y_pred`. @@ -195,7 +195,7 @@ class RootMeanSquaredError(reduction_metrics.Mean): Standalone usage: - >>> m = keras_core.metrics.RootMeanSquaredError() + >>> m = keras.metrics.RootMeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.5 @@ -212,7 +212,7 @@ class RootMeanSquaredError(reduction_metrics.Mean): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.RootMeanSquaredError()]) + metrics=[keras.metrics.RootMeanSquaredError()]) ``` """ @@ -243,7 +243,7 @@ class RootMeanSquaredError(reduction_metrics.Mean): return ops.sqrt(super().result()) -@keras_core_export("keras_core.metrics.CosineSimilarity") +@keras_export("keras.metrics.CosineSimilarity") class CosineSimilarity(reduction_metrics.MeanMetricWrapper): """Computes the cosine similarity between the labels and predictions. @@ -271,7 +271,7 @@ class CosineSimilarity(reduction_metrics.MeanMetricWrapper): >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = ((0. + 0.) + (0.5 + 0.5)) / 2 - >>> m = keras_core.metrics.CosineSimilarity(axis=1) + >>> m = keras.metrics.CosineSimilarity(axis=1) >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]]) >>> m.result() 0.49999997 @@ -287,7 +287,7 @@ class CosineSimilarity(reduction_metrics.MeanMetricWrapper): model.compile( optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.CosineSimilarity(axis=1)]) + metrics=[keras.metrics.CosineSimilarity(axis=1)]) ``` """ @@ -298,7 +298,7 @@ class CosineSimilarity(reduction_metrics.MeanMetricWrapper): return {"name": self.name, "dtype": self.dtype} -@keras_core_export("keras_core.metrics.LogCoshError") +@keras_export("keras.metrics.LogCoshError") class LogCoshError(reduction_metrics.MeanMetricWrapper): """Computes the logarithm of the hyperbolic cosine of the prediction error. @@ -317,7 +317,7 @@ class LogCoshError(reduction_metrics.MeanMetricWrapper): Standalone usage: - >>> m = keras_core.metrics.LogCoshError() + >>> m = keras.metrics.LogCoshError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.10844523 @@ -332,7 +332,7 @@ class LogCoshError(reduction_metrics.MeanMetricWrapper): ```python model.compile(optimizer='sgd', loss='mse', - metrics=[keras_core.metrics.LogCoshError()]) + metrics=[keras.metrics.LogCoshError()]) ``` """ @@ -344,7 +344,7 @@ class LogCoshError(reduction_metrics.MeanMetricWrapper): # Adapted from TF-Addons implementation (RSquare class). -@keras_core_export("keras_core.metrics.R2Score") +@keras_export("keras.metrics.R2Score") class R2Score(reduction_metrics.Metric): """Computes R2 score. @@ -389,7 +389,7 @@ class R2Score(reduction_metrics.Metric): >>> y_true = np.array([[1], [4], [3]], dtype=np.float32) >>> y_pred = np.array([[2], [4], [4]], dtype=np.float32) - >>> metric = keras_core.metrics.R2Score() + >>> metric = keras.metrics.R2Score() >>> metric.update_state(y_true, y_pred) >>> result = metric.result() >>> result @@ -590,7 +590,7 @@ def cosine_similarity(y_true, y_pred, axis=-1): >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] - >>> loss = keras_core.losses.cosine_similarity(y_true, y_pred, axis=-1) + >>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1) [0., 0.99999994, -0.99999994] """ y_pred = ops.convert_to_tensor(y_pred) diff --git a/keras_core/metrics/regression_metrics_test.py b/keras/metrics/regression_metrics_test.py similarity index 99% rename from keras_core/metrics/regression_metrics_test.py rename to keras/metrics/regression_metrics_test.py index 316deddbc..7a3dbc5b5 100644 --- a/keras_core/metrics/regression_metrics_test.py +++ b/keras/metrics/regression_metrics_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras_core import testing -from keras_core.metrics import regression_metrics as metrics +from keras import testing +from keras.metrics import regression_metrics as metrics class MeanSquaredErrorTest(testing.TestCase): diff --git a/keras_core/mixed_precision/__init__.py b/keras/mixed_precision/__init__.py similarity index 68% rename from keras_core/mixed_precision/__init__.py rename to keras/mixed_precision/__init__.py index 2bb486125..669d1540a 100644 --- a/keras_core/mixed_precision/__init__.py +++ b/keras/mixed_precision/__init__.py @@ -1,8 +1,8 @@ -from keras_core import backend -from keras_core.mixed_precision.dtype_policy import DTypePolicy -from keras_core.mixed_precision.dtype_policy import dtype_policy -from keras_core.mixed_precision.dtype_policy import set_dtype_policy -from keras_core.saving import serialization_lib +from keras import backend +from keras.mixed_precision.dtype_policy import DTypePolicy +from keras.mixed_precision.dtype_policy import dtype_policy +from keras.mixed_precision.dtype_policy import set_dtype_policy +from keras.saving import serialization_lib def resolve_policy(identifier): diff --git a/keras_core/mixed_precision/dtype_policy.py b/keras/mixed_precision/dtype_policy.py similarity index 84% rename from keras_core/mixed_precision/dtype_policy.py rename to keras/mixed_precision/dtype_policy.py index e1f513ec9..8eaec757c 100644 --- a/keras_core/mixed_precision/dtype_policy.py +++ b/keras/mixed_precision/dtype_policy.py @@ -1,12 +1,12 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state +from keras import backend +from keras.api_export import keras_export +from keras.backend.common import global_state -@keras_core_export( +@keras_export( [ - "keras_core.mixed_precision.DTypePolicy", - "keras_core.mixed_precision.Policy", + "keras.mixed_precision.DTypePolicy", + "keras.mixed_precision.Policy", ] ) class DTypePolicy: @@ -15,7 +15,7 @@ class DTypePolicy: A dtype policy determines a layer's computation and variable dtypes. Each layer has a policy. Policies can be passed to the `dtype` argument of layer constructors, or a global policy can be set with - `keras_core.mixed_precision.set_dtype_policy`. + `keras.mixed_precision.set_dtype_policy`. Args: name: The policy name, which determines the compute and variable dtypes. @@ -30,23 +30,23 @@ class DTypePolicy: precision, which is the use of float16 or bfloat16 for computations and float32 for variables. This is why the term `mixed_precision` appears in the API name. Mixed precision can be enabled by passing `"mixed_float16"` or - `"mixed_bfloat16"` to `keras_core.mixed_precision.set_dtype_policy()`. + `"mixed_bfloat16"` to `keras.mixed_precision.set_dtype_policy()`. - >>> keras_core.mixed_precision.set_dtype_policy("mixed_float16") - >>> layer1 = keras_core.layers.Dense(10) + >>> keras.mixed_precision.set_dtype_policy("mixed_float16") + >>> layer1 = keras.layers.Dense(10) >>> layer1.dtype_policy # layer1 will automatically use mixed precision >>> # Can optionally override layer to use float32 >>> # instead of mixed precision. - >>> layer2 = keras_core.layers.Dense(10, dtype="float32") + >>> layer2 = keras.layers.Dense(10, dtype="float32") >>> layer2.dtype_policy >>> # Set policy back to initial float32. - >>> keras_core.mixed_precision.set_dtype_policy('float32') + >>> keras.mixed_precision.set_dtype_policy('float32') In the example above, passing `dtype="float32"` to the layer is equivalent to passing - `dtype=keras_core.mixed_precision.DTypePolicy("float32")`. + `dtype=keras.mixed_precision.DTypePolicy("float32")`. In general, passing a dtype policy name to a layer is equivalent to passing the corresponding policy, so it is never necessary to explicitly construct a `DTypePolicy` object. @@ -140,10 +140,10 @@ class DTypePolicy: return cls(**config) -@keras_core_export( +@keras_export( [ - "keras_core.mixed_precision.set_dtype_policy", - "keras_core.mixed_precision.set_global_policy", + "keras.mixed_precision.set_dtype_policy", + "keras.mixed_precision.set_global_policy", ] ) def set_dtype_policy(policy): @@ -151,7 +151,7 @@ def set_dtype_policy(policy): Example: - >>> keras_core.mixed_precision.set_dtype_policy("mixed_float16") + >>> keras.mixed_precision.set_dtype_policy("mixed_float16") """ if not isinstance(policy, DTypePolicy): if isinstance(policy, str): @@ -167,10 +167,10 @@ def set_dtype_policy(policy): global_state.set_global_attribute("dtype_policy", policy) -@keras_core_export( +@keras_export( [ - "keras_core.mixed_precision.dtype_policy", - "keras_core.mixed_precision.global_policy", + "keras.mixed_precision.dtype_policy", + "keras.mixed_precision.global_policy", ] ) def dtype_policy(): diff --git a/keras/models/__init__.py b/keras/models/__init__.py new file mode 100644 index 000000000..21d90b45f --- /dev/null +++ b/keras/models/__init__.py @@ -0,0 +1,3 @@ +from keras.models.functional import Functional +from keras.models.model import Model +from keras.models.sequential import Sequential diff --git a/keras_core/models/cloning.py b/keras/models/cloning.py similarity index 93% rename from keras_core/models/cloning.py rename to keras/models/cloning.py index 80e6cf6ad..d3dea57d9 100644 --- a/keras_core/models/cloning.py +++ b/keras/models/cloning.py @@ -1,17 +1,17 @@ import tree -from keras_core import backend -from keras_core import utils -from keras_core.api_export import keras_core_export -from keras_core.layers import Input -from keras_core.layers import InputLayer -from keras_core.models.functional import Functional -from keras_core.models.functional import functional_like_constructor -from keras_core.models.sequential import Sequential -from keras_core.saving import serialization_lib +from keras import backend +from keras import utils +from keras.api_export import keras_export +from keras.layers import Input +from keras.layers import InputLayer +from keras.models.functional import Functional +from keras.models.functional import functional_like_constructor +from keras.models.sequential import Sequential +from keras.saving import serialization_lib -@keras_core_export("keras_core.models.clone_model") +@keras_export("keras.models.clone_model") def clone_model(model, input_tensors=None, clone_function=None): """Clone a Functional or Sequential `Model` instance. @@ -55,10 +55,10 @@ def clone_model(model, input_tensors=None, clone_function=None): ```python # Create a test Sequential model. - model = keras_core.Sequential([ - keras_core.layers.Input(shape=(728,)), - keras_core.layers.Dense(32, activation='relu'), - keras_core.layers.Dense(1, activation='sigmoid'), + model = keras.Sequential([ + keras.layers.Input(shape=(728,)), + keras.layers.Dense(32, activation='relu'), + keras.layers.Dense(1, activation='sigmoid'), ]) # Create a copy of the test model (with freshly initialized weights). new_model = clone_model(model) diff --git a/keras_core/models/cloning_test.py b/keras/models/cloning_test.py similarity index 94% rename from keras_core/models/cloning_test.py rename to keras/models/cloning_test.py index 0df8e12a1..30aabcd2c 100644 --- a/keras_core/models/cloning_test.py +++ b/keras/models/cloning_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core.models.cloning import clone_model +from keras import layers +from keras import models +from keras import testing +from keras.models.cloning import clone_model def get_functional_model(shared_layers=False): diff --git a/keras_core/models/functional.py b/keras/models/functional.py similarity index 93% rename from keras_core/models/functional.py rename to keras/models/functional.py index 5ca83f022..80d82a288 100644 --- a/keras_core/models/functional.py +++ b/keras/models/functional.py @@ -4,18 +4,18 @@ import warnings import tree -from keras_core import backend -from keras_core import ops -from keras_core.backend.common import global_state -from keras_core.layers.input_spec import InputSpec -from keras_core.layers.layer import Layer -from keras_core.legacy.saving import saving_utils -from keras_core.legacy.saving import serialization as legacy_serialization -from keras_core.models.model import Model -from keras_core.ops.function import Function -from keras_core.ops.function import make_node_key -from keras_core.saving import serialization_lib -from keras_core.utils import tracking +from keras import backend +from keras import ops +from keras.backend.common import global_state +from keras.layers.input_spec import InputSpec +from keras.layers.layer import Layer +from keras.legacy.saving import saving_utils +from keras.legacy.saving import serialization as legacy_serialization +from keras.models.model import Model +from keras.ops.function import Function +from keras.ops.function import make_node_key +from keras.saving import serialization_lib +from keras.utils import tracking class Functional(Function, Model): @@ -25,7 +25,7 @@ class Functional(Function, Model): and `Sequential` (a special case of `Functional`). A `Functional` model can be instantiated by passing two arguments to - `__init__()`. The first argument is the `keras_core.Input` objects + `__init__()`. The first argument is the `keras.Input` objects that represent the inputs to the model. The second argument specifies the output tensors that represent the outputs of this model. Both arguments can be a nested structure @@ -34,23 +34,23 @@ class Functional(Function, Model): Example: ``` - inputs = {'x1': keras_core.Input(shape=(10,), name='x1'), - 'x2': keras_core.Input(shape=(1,), name='x2')} - t = keras_core.layers.Dense(1, activation='relu')(inputs['x1']) - outputs = keras_core.layers.Add()([t, inputs['x2']]) - model = keras_core.Model(inputs, outputs) + inputs = {'x1': keras.Input(shape=(10,), name='x1'), + 'x2': keras.Input(shape=(1,), name='x2')} + t = keras.layers.Dense(1, activation='relu')(inputs['x1']) + outputs = keras.layers.Add()([t, inputs['x2']]) + model = keras.Model(inputs, outputs) ``` A `Functional` model constructed using the Functional API can also - include raw Keras Core ops. + include raw Keras 3 ops. Example: ```python - inputs = keras_core.Input(shape=(10,)) - x = keras_core.layers.Dense(1)(inputs) + inputs = keras.Input(shape=(10,)) + x = keras.layers.Dense(1)(inputs) outputs = ops.nn.relu(x) - model = keras_core.Model(inputs, outputs) + model = keras.Model(inputs, outputs) ``` A new `Functional` model can also be created by using the @@ -60,20 +60,20 @@ class Functional(Function, Model): Example: ```python - inputs = keras_core.Input(shape=(None, None, 3)) - processed = keras_core.layers.RandomCrop(width=32, height=32)(inputs) - conv = keras_core.layers.Conv2D(filters=2, kernel_size=3)(processed) - pooling = keras_core.layers.GlobalAveragePooling2D()(conv) - feature = keras_core.layers.Dense(10)(pooling) + inputs = keras.Input(shape=(None, None, 3)) + processed = keras.layers.RandomCrop(width=32, height=32)(inputs) + conv = keras.layers.Conv2D(filters=2, kernel_size=3)(processed) + pooling = keras.layers.GlobalAveragePooling2D()(conv) + feature = keras.layers.Dense(10)(pooling) - full_model = keras_core.Model(inputs, feature) - backbone = keras_core.Model(processed, conv) - activations = keras_core.Model(conv, feature) + full_model = keras.Model(inputs, feature) + backbone = keras.Model(processed, conv) + activations = keras.Model(conv, feature) ``` Note that the `backbone` and `activations` models are not - created with `keras_core.Input` objects, but with the tensors - that are originated from `keras_core.Input` objects. + created with `keras.Input` objects, but with the tensors + that are originated from `keras.Input` objects. Under the hood, the layers and weights will be shared across these models, so that user can train the `full_model`, and use `backbone` or `activations` to do feature extraction. @@ -82,8 +82,8 @@ class Functional(Function, Model): all the existing API. Args: - inputs: List of input tensors (must be created via `keras_core.Input()` - or originated from `keras_core.Input()`). + inputs: List of input tensors (must be created via `keras.Input()` + or originated from `keras.Input()`). outputs: List of output tensors. name: String, optional. Name of the model. trainable: Boolean, optional. If the model's variables should be diff --git a/keras_core/models/functional_test.py b/keras/models/functional_test.py similarity index 97% rename from keras_core/models/functional_test.py rename to keras/models/functional_test.py index d5912acc0..fe89f3e29 100644 --- a/keras_core/models/functional_test.py +++ b/keras/models/functional_test.py @@ -3,13 +3,13 @@ import warnings import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import testing -from keras_core.layers.core.input_layer import Input -from keras_core.layers.input_spec import InputSpec -from keras_core.models import Functional -from keras_core.models import Model +from keras import backend +from keras import layers +from keras import testing +from keras.layers.core.input_layer import Input +from keras.layers.input_spec import InputSpec +from keras.models import Functional +from keras.models import Model class FunctionalTest(testing.TestCase): diff --git a/keras_core/models/model.py b/keras/models/model.py similarity index 85% rename from keras_core/models/model.py rename to keras/models/model.py index f2f18868a..7dc8641b6 100644 --- a/keras_core/models/model.py +++ b/keras/models/model.py @@ -3,36 +3,36 @@ import json import os import warnings -from keras_core import backend -from keras_core import utils -from keras_core.api_export import keras_core_export -from keras_core.layers.layer import Layer -from keras_core.legacy.saving import legacy_h5_format -from keras_core.models.variable_mapping import map_trackable_variables -from keras_core.saving import saving_api -from keras_core.saving import saving_lib -from keras_core.trainers import trainer as base_trainer -from keras_core.utils import io_utils -from keras_core.utils import summary_utils -from keras_core.utils import traceback_utils +from keras import backend +from keras import utils +from keras.api_export import keras_export +from keras.layers.layer import Layer +from keras.legacy.saving import legacy_h5_format +from keras.models.variable_mapping import map_trackable_variables +from keras.saving import saving_api +from keras.saving import saving_lib +from keras.trainers import trainer as base_trainer +from keras.utils import io_utils +from keras.utils import summary_utils +from keras.utils import traceback_utils if backend.backend() == "tensorflow": - from keras_core.backend.tensorflow.trainer import ( + from keras.backend.tensorflow.trainer import ( TensorFlowTrainer as Trainer, ) elif backend.backend() == "jax": - from keras_core.backend.jax.trainer import JAXTrainer as Trainer + from keras.backend.jax.trainer import JAXTrainer as Trainer elif backend.backend() == "torch": - from keras_core.backend.torch.trainer import TorchTrainer as Trainer + from keras.backend.torch.trainer import TorchTrainer as Trainer elif backend.backend() == "numpy": - from keras_core.backend.numpy.trainer import NumpyTrainer as Trainer + from keras.backend.numpy.trainer import NumpyTrainer as Trainer else: raise RuntimeError( f"Backend '{backend.backend()}' must implement the Trainer class." ) -@keras_core_export(["keras_core.Model", "keras_core.models.Model"]) +@keras_export(["keras.Model", "keras.models.Model"]) class Model(Trainer, Layer): """A model grouping layers into an object with training/inference features. @@ -45,10 +45,10 @@ class Model(Trainer, Layer): and finally you create your model from inputs and outputs: ```python - inputs = keras_core.Input(shape=(37,)) - x = keras_core.layers.Dense(32, activation="relu")(inputs) - outputs = keras_core.layers.Dense(5, activation="softmax")(x) - model = keras_core.Model(inputs=inputs, outputs=outputs) + inputs = keras.Input(shape=(37,)) + x = keras.layers.Dense(32, activation="relu")(inputs) + outputs = keras.layers.Dense(5, activation="softmax")(x) + model = keras.Model(inputs=inputs, outputs=outputs) ``` Note: Only dicts, lists, and tuples of input tensors are supported. Nested @@ -61,20 +61,20 @@ class Model(Trainer, Layer): Example: ```python - inputs = keras_core.Input(shape=(None, None, 3)) - processed = keras_core.layers.RandomCrop(width=128, height=128)(inputs) - conv = keras_core.layers.Conv2D(filters=32, kernel_size=3)(processed) - pooling = keras_core.layers.GlobalAveragePooling2D()(conv) - feature = keras_core.layers.Dense(10)(pooling) + inputs = keras.Input(shape=(None, None, 3)) + processed = keras.layers.RandomCrop(width=128, height=128)(inputs) + conv = keras.layers.Conv2D(filters=32, kernel_size=3)(processed) + pooling = keras.layers.GlobalAveragePooling2D()(conv) + feature = keras.layers.Dense(10)(pooling) - full_model = keras_core.Model(inputs, feature) - backbone = keras_core.Model(processed, conv) - activations = keras_core.Model(conv, feature) + full_model = keras.Model(inputs, feature) + backbone = keras.Model(processed, conv) + activations = keras.Model(conv, feature) ``` Note that the `backbone` and `activations` models are not - created with `keras_core.Input` objects, but with the tensors that originate - from `keras_core.Input` objects. Under the hood, the layers and weights will + created with `keras.Input` objects, but with the tensors that originate + from `keras.Input` objects. Under the hood, the layers and weights will be shared across these models, so that user can train the `full_model`, and use `backbone` or `activations` to do feature extraction. The inputs and outputs of the model can be nested structures of tensors as @@ -88,11 +88,11 @@ class Model(Trainer, Layer): in `call()`. ```python - class MyModel(keras_core.Model): + class MyModel(keras.Model): def __init__(self): super().__init__() - self.dense1 = keras_core.layers.Dense(32, activation="relu") - self.dense2 = keras_core.layers.Dense(5, activation="softmax") + self.dense1 = keras.layers.Dense(32, activation="relu") + self.dense2 = keras.layers.Dense(5, activation="softmax") def call(self, inputs): x = self.dense1(inputs) @@ -106,12 +106,12 @@ class Model(Trainer, Layer): a different behavior in training and inference: ```python - class MyModel(keras_core.Model): + class MyModel(keras.Model): def __init__(self): super().__init__() - self.dense1 = keras_core.layers.Dense(32, activation="relu") - self.dense2 = keras_core.layers.Dense(5, activation="softmax") - self.dropout = keras_core.layers.Dropout(0.5) + self.dense1 = keras.layers.Dense(32, activation="relu") + self.dense2 = keras.layers.Dense(5, activation="softmax") + self.dropout = keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) @@ -127,13 +127,13 @@ class Model(Trainer, Layer): ## With the `Sequential` class - In addition, `keras_core.Sequential` is a special case of model where + In addition, `keras.Sequential` is a special case of model where the model is purely a stack of single-input, single-output layers. ```python - model = keras_core.Sequential([ - keras_core.Input(shape=(None, None, 3)), - keras_core.layers.Conv2D(filters=32, kernel_size=3), + model = keras.Sequential([ + keras.Input(shape=(None, None, 3)), + keras.layers.Conv2D(filters=32, kernel_size=3), ]) ``` """ @@ -141,14 +141,14 @@ class Model(Trainer, Layer): def __new__(cls, *args, **kwargs): # Signature detection for usage of `Model` as a `Functional` if functional_init_arguments(args, kwargs) and cls == Model: - from keras_core.models import functional + from keras.models import functional return functional.Functional(*args, **kwargs) return super().__new__(cls) def __init__(self, *args, **kwargs): Trainer.__init__(self) - from keras_core.models import functional + from keras.models import functional # Signature detection for usage of a `Model` subclass # as a `Functional` subclass @@ -279,20 +279,20 @@ class Model(Trainer, Layer): Example: ```python - model = keras_core.Sequential( + model = keras.Sequential( [ - keras_core.layers.Dense(5, input_shape=(3,)), - keras_core.layers.Softmax(), + keras.layers.Dense(5, input_shape=(3,)), + keras.layers.Softmax(), ], ) model.save("model.keras") - loaded_model = keras_core.saving.load_model("model.keras") + loaded_model = keras.saving.load_model("model.keras") x = keras.random.uniform((10, 3)) assert np.allclose(model.predict(x), loaded_model.predict(x)) ``` Note that `model.save()` is an alias for - `keras_core.saving.save_model()`. + `keras.saving.save_model()`. The saved `.keras` file contains: @@ -314,14 +314,14 @@ class Model(Trainer, Layer): filepath ).endswith(".keras"): warnings.warn( - "The `save_format` argument is deprecated in Keras Core. " + "The `save_format` argument is deprecated in Keras 3. " "We recommend removing this argument as it can be inferred " "from the file path. " f"Received: save_format={save_format}" ) else: raise ValueError( - "The `save_format` argument is deprecated in Keras Core. " + "The `save_format` argument is deprecated in Keras 3. " "Please remove this argument and pass a file path with " "either `.keras` or `.h5` extension." f"Received: save_format={save_format}" @@ -472,7 +472,7 @@ class Model(Trainer, Layer): Returns: A JSON string. """ - from keras_core.saving import serialization_lib + from keras.saving import serialization_lib model_config = serialization_lib.serialize_keras_object(self) return json.dumps(model_config, **kwargs) @@ -508,16 +508,16 @@ class Model(Trainer, Layer): ``` If you would like to customize your serving endpoints, you can - use the lower-level `keras_core.export.ExportArchive` class. The + use the lower-level `keras.export.ExportArchive` class. The `export()` method relies on `ExportArchive` internally. """ - from keras_core.export import export_lib + from keras.export import export_lib export_lib.export_model(self, filepath) @classmethod def from_config(cls, config, custom_objects=None): - from keras_core.models.functional import Functional + from keras.models.functional import Functional functional_config_keys = [ "name", @@ -540,7 +540,7 @@ class Model(Trainer, Layer): if is_functional_config and revivable_as_functional: # Revive Functional model # (but not Functional subclasses with a custom __init__) - from keras_core.models.functional import functional_from_config + from keras.models.functional import functional_from_config return functional_from_config( cls, config, custom_objects=custom_objects @@ -576,17 +576,17 @@ class Model(Trainer, Layer): return store -@keras_core_export("keras_core.models.model_from_json") +@keras_export("keras.models.model_from_json") def model_from_json(json_string, custom_objects=None): """Parses a JSON model configuration string and returns a model instance. Usage: - >>> model = keras_core.Sequential([ - ... keras_core.layers.Dense(5, input_shape=(3,)), - ... keras_core.layers.Softmax()]) + >>> model = keras.Sequential([ + ... keras.layers.Dense(5, input_shape=(3,)), + ... keras.layers.Softmax()]) >>> config = model.to_json() - >>> loaded_model = keras_core.models.model_from_json(config) + >>> loaded_model = keras.models.model_from_json(config) Args: json_string: JSON string encoding a model configuration. @@ -597,7 +597,7 @@ def model_from_json(json_string, custom_objects=None): Returns: A Keras model instance (uncompiled). """ - from keras_core.saving import serialization_lib + from keras.saving import serialization_lib model_config = json.loads(json_string) return serialization_lib.deserialize_keras_object( @@ -615,7 +615,7 @@ def functional_init_arguments(args, kwargs): def inject_functional_model_class(cls): """Inject `Functional` into the hierarchy of this class if needed.""" - from keras_core.models import functional + from keras.models import functional if cls == Model: return functional.Functional diff --git a/keras_core/models/model_test.py b/keras/models/model_test.py similarity index 98% rename from keras_core/models/model_test.py rename to keras/models/model_test.py index cc41b4cf5..c4b8c391b 100644 --- a/keras_core/models/model_test.py +++ b/keras/models/model_test.py @@ -2,12 +2,12 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import testing -from keras_core.layers.core.input_layer import Input -from keras_core.models.functional import Functional -from keras_core.models.model import Model -from keras_core.models.model import model_from_json +from keras import layers +from keras import testing +from keras.layers.core.input_layer import Input +from keras.models.functional import Functional +from keras.models.model import Model +from keras.models.model import model_from_json def _get_model(): @@ -78,7 +78,7 @@ class ModelTest(testing.TestCase, parameterized.TestCase): self.assertEqual(json_string, new_model.to_json()) def test_tuple_input_model_subclass(self): - # https://github.com/keras-team/keras-core/issues/324 + # https://github.com/keras-team/keras/issues/324 class MultiInputModel(Model): def __init__(self, **kwargs): diff --git a/keras_core/models/sequential.py b/keras/models/sequential.py similarity index 90% rename from keras_core/models/sequential.py rename to keras/models/sequential.py index 0ce0937d0..349b7674f 100644 --- a/keras_core/models/sequential.py +++ b/keras/models/sequential.py @@ -2,58 +2,58 @@ import copy import tree -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state -from keras_core.layers.core.input_layer import InputLayer -from keras_core.layers.layer import Layer -from keras_core.legacy.saving import saving_utils -from keras_core.legacy.saving import serialization as legacy_serialization -from keras_core.models.functional import Functional -from keras_core.models.model import Model -from keras_core.saving import serialization_lib +from keras.api_export import keras_export +from keras.backend.common import global_state +from keras.layers.core.input_layer import InputLayer +from keras.layers.layer import Layer +from keras.legacy.saving import saving_utils +from keras.legacy.saving import serialization as legacy_serialization +from keras.models.functional import Functional +from keras.models.model import Model +from keras.saving import serialization_lib -@keras_core_export(["keras_core.Sequential", "keras_core.models.Sequential"]) +@keras_export(["keras.Sequential", "keras.models.Sequential"]) class Sequential(Model): """`Sequential` groups a linear stack of layers into a `Model`. Examples: ```python - model = keras_core.Sequential() - model.add(keras_core.Input(shape=(16,))) - model.add(keras_core.layers.Dense(8)) + model = keras.Sequential() + model.add(keras.Input(shape=(16,))) + model.add(keras.layers.Dense(8)) # Note that you can also omit the initial `Input`. # In that case the model doesn't have any weights until the first call # to a training/evaluation method (since it isn't yet built): - model = keras_core.Sequential() - model.add(keras_core.layers.Dense(8)) - model.add(keras_core.layers.Dense(4)) + model = keras.Sequential() + model.add(keras.layers.Dense(8)) + model.add(keras.layers.Dense(4)) # model.weights not created yet # Whereas if you specify an `Input`, the model gets built # continuously as you are adding layers: - model = keras_core.Sequential() - model.add(keras_core.Input(shape=(16,))) - model.add(keras_core.layers.Dense(8)) + model = keras.Sequential() + model.add(keras.Input(shape=(16,))) + model.add(keras.layers.Dense(8)) len(model.weights) # Returns "2" # When using the delayed-build pattern (no input shape specified), you can # choose to manually build your model by calling # `build(batch_input_shape)`: - model = keras_core.Sequential() - model.add(keras_core.layers.Dense(8)) - model.add(keras_core.layers.Dense(4)) + model = keras.Sequential() + model.add(keras.layers.Dense(8)) + model.add(keras.layers.Dense(4)) model.build((None, 16)) len(model.weights) # Returns "4" # Note that when using the delayed-build pattern (no input shape specified), # the model gets built the first time you call `fit`, `eval`, or `predict`, # or the first time you call the model on some input data. - model = keras_core.Sequential() - model.add(keras_core.layers.Dense(8)) - model.add(keras_core.layers.Dense(1)) + model = keras.Sequential() + model.add(keras.layers.Dense(8)) + model.add(keras.layers.Dense(1)) model.compile(optimizer='sgd', loss='mse') # This builds the model for the first time: model.fit(x, y, batch_size=32, epochs=10) @@ -89,7 +89,7 @@ class Sequential(Model): layer = origin_layer if not isinstance(layer, Layer): raise ValueError( - "Only instances of `keras_core.Layer` can be " + "Only instances of `keras.Layer` can be " f"added to a Sequential model. Received: {layer} " f"(of type {type(layer)})" ) diff --git a/keras_core/models/sequential_test.py b/keras/models/sequential_test.py similarity index 96% rename from keras_core/models/sequential_test.py rename to keras/models/sequential_test.py index c4f2d4633..eed805ea2 100644 --- a/keras_core/models/sequential_test.py +++ b/keras/models/sequential_test.py @@ -1,12 +1,12 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import testing -from keras_core.layers.core.input_layer import Input -from keras_core.models.functional import Functional -from keras_core.models.sequential import Sequential +from keras import backend +from keras import layers +from keras import testing +from keras.layers.core.input_layer import Input +from keras.models.functional import Functional +from keras.models.sequential import Sequential @pytest.mark.requires_trainable_backend diff --git a/keras_core/models/variable_mapping.py b/keras/models/variable_mapping.py similarity index 91% rename from keras_core/models/variable_mapping.py rename to keras/models/variable_mapping.py index e443c9536..ec04015a7 100644 --- a/keras_core/models/variable_mapping.py +++ b/keras/models/variable_mapping.py @@ -1,7 +1,7 @@ -from keras_core.layers.layer import Layer -from keras_core.metrics.metric import Metric -from keras_core.optimizers.optimizer import Optimizer -from keras_core.saving import saving_lib +from keras.layers.layer import Layer +from keras.metrics.metric import Metric +from keras.optimizers.optimizer import Optimizer +from keras.saving import saving_lib def map_trackable_variables(trackable, store, visited_trackables): diff --git a/keras_core/models/variable_mapping_test.py b/keras/models/variable_mapping_test.py similarity index 94% rename from keras_core/models/variable_mapping_test.py rename to keras/models/variable_mapping_test.py index e8267e009..6eadce71b 100644 --- a/keras_core/models/variable_mapping_test.py +++ b/keras/models/variable_mapping_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import testing -from keras_core.saving import saving_lib_test +from keras import testing +from keras.saving import saving_lib_test class VariableMappingTest(testing.TestCase): diff --git a/keras/ops/__init__.py b/keras/ops/__init__.py new file mode 100644 index 000000000..2e405791a --- /dev/null +++ b/keras/ops/__init__.py @@ -0,0 +1,15 @@ +# from keras.ops.numpy import Matmul, matmul +# from keras.ops.numpy import Add, add +# from keras.ops.numpy import Multiply, multiply + +from keras.backend import cast +from keras.backend import cond +from keras.backend import is_tensor +from keras.backend import name_scope +from keras.backend import random +from keras.ops import image +from keras.ops import operation_utils +from keras.ops.core import * # noqa: F403 +from keras.ops.math import * # noqa: F403 +from keras.ops.nn import * # noqa: F403 +from keras.ops.numpy import * # noqa: F403 diff --git a/keras_core/ops/core.py b/keras/ops/core.py similarity index 90% rename from keras_core/ops/core.py rename to keras/ops/core.py index d60acf4e9..2ba549d4a 100644 --- a/keras_core/ops/core.py +++ b/keras/ops/core.py @@ -14,12 +14,12 @@ cond import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend import KerasTensor -from keras_core.backend import any_symbolic_tensors -from keras_core.ops.operation import Operation -from keras_core.utils import traceback_utils +from keras import backend +from keras.api_export import keras_export +from keras.backend import KerasTensor +from keras.backend import any_symbolic_tensors +from keras.ops.operation import Operation +from keras.utils import traceback_utils class Scatter(Operation): @@ -30,7 +30,7 @@ class Scatter(Operation): return KerasTensor(shape, dtype=values.dtype) -@keras_core_export("keras_core.ops.scatter") +@keras_export("keras.ops.scatter") def scatter(indices, values, shape): """Returns a tensor of shape `shape` where `indices` are set to `values`. @@ -38,8 +38,8 @@ def scatter(indices, values, shape): returns the output. It is equivalent to: ```python - zeros = keras_core.ops.zeros(shape) - output = keras_core.ops.scatter_update(zeros, indices, values) + zeros = keras.ops.zeros(shape) + output = keras.ops.scatter_update(zeros, indices, values) ``` Args: @@ -52,7 +52,7 @@ def scatter(indices, values, shape): >>> indices = [[0, 1], [1, 1]] >>> values = np.array([1., 1.]) - >>> keras_core.ops.scatter(indices, values, shape=(2, 2)) + >>> keras.ops.scatter(indices, values, shape=(2, 2)) array([[0., 1.], [0., 1.]]) """ @@ -69,7 +69,7 @@ class ScatterUpdate(Operation): return KerasTensor(inputs.shape, dtype=inputs.dtype) -@keras_core_export("keras_core.ops.scatter_update") +@keras_export("keras.ops.scatter_update") def scatter_update(inputs, indices, updates): """Update inputs via updates at scattered (sparse) indices. @@ -87,7 +87,7 @@ def scatter_update(inputs, indices, updates): inputs = np.zeros((4, 4, 4)) indices = [[1, 2, 3], [0, 1, 3]] updates = np.array([1., 1.]) - inputs = keras_core.ops.scatter_update(inputs, indices, updates) + inputs = keras.ops.scatter_update(inputs, indices, updates) ``` 2 `indices` is a 2D tensor of shape `(num_updates, k)`, where `num_updates` @@ -103,7 +103,7 @@ def scatter_update(inputs, indices, updates): inputs = np.zeros((4, 4, 4)) indices = [[1, 2], [2, 3]] updates = np.array([[1., 1., 1, 1,], [1., 1., 1, 1,]) - inputs = keras_core.ops.scatter_update(inputs, indices, updates) + inputs = keras.ops.scatter_update(inputs, indices, updates) ``` Args: @@ -129,7 +129,7 @@ class Slice(Operation): return KerasTensor(shape, dtype=inputs.dtype) -@keras_core_export("keras_core.ops.slice") +@keras_export("keras.ops.slice") def slice(inputs, start_indices, shape): """Return a slice of an input tensor. @@ -143,7 +143,7 @@ def slice(inputs, start_indices, shape): inputs = np.zeros((5, 5)) start_indices = np.array([3, 3]) shape = np.array([2, 2]) - inputs = keras_core.ops.slice(inputs, start_indices, updates) + inputs = keras.ops.slice(inputs, start_indices, updates) ``` Args: @@ -168,7 +168,7 @@ class SliceUpdate(Operation): return KerasTensor(inputs.shape, dtype=inputs.dtype) -@keras_core_export("keras_core.ops.slice_update") +@keras_export("keras.ops.slice_update") def slice_update(inputs, start_indices, updates): """Update an input by slicing in a tensor of updated values. @@ -186,7 +186,7 @@ def slice_update(inputs, start_indices, updates): inputs = np.zeros((5, 5)) start_indices = [3, 3] updates = np.ones((2, 2)) - inputs = keras_core.ops.slice_update(inputs, start_indices, updates) + inputs = keras.ops.slice_update(inputs, start_indices, updates) ``` Args: @@ -223,7 +223,7 @@ class WhileLoop(Operation): return [KerasTensor(v.shape, dtype=v.dtype) for v in loop_vars] -@keras_core_export("keras_core.ops.while_loop") +@keras_export("keras.ops.while_loop") def while_loop( cond, body, @@ -252,7 +252,7 @@ def while_loop( >>> i = 0 >>> cond = lambda i: i < 10 >>> body = lambda i: i + 1 - >>> keras_core.ops.while_loop(cond, body, [i])[0] + >>> keras.ops.while_loop(cond, body, [i])[0] 10 """ return backend.core.while_loop( @@ -274,7 +274,7 @@ class StopGradient(Operation): return KerasTensor(variable.shape, dtype=variable.dtype) -@keras_core_export("keras_core.ops.stop_gradient") +@keras_export("keras.ops.stop_gradient") def stop_gradient(variable): """Stops gradient computation. @@ -287,11 +287,11 @@ def stop_gradient(variable): Examples: - >>> var = keras_core.backend.convert_to_tensor( + >>> var = keras.backend.convert_to_tensor( ... [1., 2., 3.], ... dtype="float32" ... ) - >>> var = keras_core.ops.stop_gradient(var) + >>> var = keras.ops.stop_gradient(var) """ return backend.core.stop_gradient(variable) @@ -315,7 +315,7 @@ class ForiLoop(Operation): return KerasTensor(init_val.shape, dtype=init_val.dtype) -@keras_core_export("keras_core.ops.fori_loop") +@keras_export("keras.ops.fori_loop") def fori_loop(lower, upper, body_fun, init_val): """For loop implementation. @@ -336,7 +336,7 @@ def fori_loop(lower, upper, body_fun, init_val): >>> upper = 10 >>> body_fun = lambda i, s: (i + 1, s + i) >>> init_val = 0 - >>> keras_core.ops.fori_loop(lower, upper, body_fun, init_val) + >>> keras.ops.fori_loop(lower, upper, body_fun, init_val) 45 """ if any_symbolic_tensors((lower, upper, init_val)): @@ -374,7 +374,7 @@ class Unstack(Operation): return output -@keras_core_export("keras_core.ops.unstack") +@keras_export("keras.ops.unstack") def unstack(x, num=None, axis=0): """Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors. @@ -389,8 +389,8 @@ def unstack(x, num=None, axis=0): Example: - >>> x = keras_core.ops.array([[1, 2], [3, 4]]) - >>> keras_core.ops.unstack(x, axis=0) + >>> x = keras.ops.array([[1, 2], [3, 4]]) + >>> keras.ops.unstack(x, axis=0) [array([1, 2]), array([3, 4])] """ if any_symbolic_tensors((x,)): @@ -398,7 +398,7 @@ def unstack(x, num=None, axis=0): return backend.core.unstack(x, num=num, axis=axis) -@keras_core_export("keras_core.ops.shape") +@keras_export("keras.ops.shape") def shape(x): """Gets the shape of the tensor input. @@ -416,8 +416,8 @@ def shape(x): Example: - >>> x = keras_core.zeros((8, 12)) - >>> keras_core.ops.shape(x) + >>> x = keras.zeros((8, 12)) + >>> keras.ops.shape(x) (8, 12) """ if any_symbolic_tensors((x,)): @@ -437,7 +437,7 @@ class Cast(Operation): return backend.KerasTensor(shape=x.shape, dtype=self.dtype) -@keras_core_export("keras_core.ops.cast") +@keras_export("keras.ops.cast") def cast(x, dtype): """Cast a tensor to the desired dtype. @@ -450,8 +450,8 @@ def cast(x, dtype): Example: - >>> x = keras_core.ops.arange(4) - >>> x = keras_core.ops.cast(x, dtype="float16") + >>> x = keras.ops.arange(4) + >>> x = keras.ops.cast(x, dtype="float16") """ dtype = backend.standardize_dtype(dtype) @@ -460,7 +460,7 @@ def cast(x, dtype): return backend.core.cast(x, dtype) -@keras_core_export("keras_core.ops.convert_to_tensor") +@keras_export("keras.ops.convert_to_tensor") def convert_to_tensor(x, dtype=None): """Convert a NumPy array to a tensor. @@ -474,12 +474,12 @@ def convert_to_tensor(x, dtype=None): Example: >>> x = np.array([1, 2, 3]) - >>> y = keras_core.ops.convert_to_tensor(x) + >>> y = keras.ops.convert_to_tensor(x) """ return backend.convert_to_tensor(x, dtype=dtype) -@keras_core_export("keras_core.ops.convert_to_numpy") +@keras_export("keras.ops.convert_to_numpy") def convert_to_numpy(x): """Convert a tensor to a NumPy array. @@ -579,7 +579,7 @@ class Cond(Operation): return True -@keras_core_export("keras_core.ops.cond") +@keras_export("keras.ops.cond") def cond(pred, true_fn, false_fn): """Conditionally applies `true_fn` or `false_fn`. diff --git a/keras_core/ops/core_test.py b/keras/ops/core_test.py similarity index 97% rename from keras_core/ops/core_test.py rename to keras/ops/core_test.py index 710bf3e6a..07c79aee6 100644 --- a/keras_core/ops/core_test.py +++ b/keras/ops/core_test.py @@ -1,15 +1,15 @@ import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import losses -from keras_core import models -from keras_core import ops -from keras_core import optimizers -from keras_core import testing -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.ops import core +from keras import backend +from keras import layers +from keras import losses +from keras import models +from keras import ops +from keras import optimizers +from keras import testing +from keras.backend.common.keras_tensor import KerasTensor +from keras.ops import core class CoreOpsStaticShapeTest(testing.TestCase): diff --git a/keras_core/ops/function.py b/keras/ops/function.py similarity index 96% rename from keras_core/ops/function.py rename to keras/ops/function.py index 8662b12f4..719c7963e 100644 --- a/keras_core/ops/function.py +++ b/keras/ops/function.py @@ -2,14 +2,14 @@ import collections import tree -from keras_core.api_export import keras_core_export -from keras_core.backend import KerasTensor -from keras_core.backend.config import backend -from keras_core.ops.operation import Operation -from keras_core.utils.nest import pack_sequence_as +from keras.api_export import keras_export +from keras.backend import KerasTensor +from keras.backend.config import backend +from keras.ops.operation import Operation +from keras.utils.nest import pack_sequence_as -@keras_core_export("keras_core.Function") +@keras_export("keras.Function") class Function(Operation): """Class that encapsulates a computation graph of Keras operations. @@ -24,11 +24,11 @@ class Function(Operation): Example: ```python - input_1 = keras_core.KerasTensor(shape=(None, 2, 3)) - input_2 = keras_core.KerasTensor(shape=(None, 2, 3)) + input_1 = keras.KerasTensor(shape=(None, 2, 3)) + input_2 = keras.KerasTensor(shape=(None, 2, 3)) x = input_1 + input_2 - output = keras_core.ops.sigmoid(x) - fn = keras_core.Function(inputs=[input_1, input_2], outputs=output) + output = keras.ops.sigmoid(x) + fn = keras.Function(inputs=[input_1, input_2], outputs=output) input_1_val = np.random.random((4, 2, 3)) input_2_val = np.random.random((4, 2, 3)) @@ -49,7 +49,7 @@ class Function(Operation): if backend() == "tensorflow": # Temporary work around for - # https://github.com/keras-team/keras-core/issues/931 + # https://github.com/keras-team/keras/issues/931 # This stop tensorflow from wrapping tf.function output in a # _DictWrapper object. _self_setattr_tracking = getattr( diff --git a/keras_core/ops/function_test.py b/keras/ops/function_test.py similarity index 96% rename from keras_core/ops/function_test.py rename to keras/ops/function_test.py index 3c06db1ca..33b042770 100644 --- a/keras_core/ops/function_test.py +++ b/keras/ops/function_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras_core import testing -from keras_core.backend.common import keras_tensor -from keras_core.ops import function -from keras_core.ops import numpy as knp +from keras import testing +from keras.backend.common import keras_tensor +from keras.ops import function +from keras.ops import numpy as knp class FunctionTest(testing.TestCase): diff --git a/keras_core/ops/image.py b/keras/ops/image.py similarity index 94% rename from keras_core/ops/image.py rename to keras/ops/image.py index d3ef02362..27d79a5e3 100644 --- a/keras_core/ops/image.py +++ b/keras/ops/image.py @@ -1,9 +1,9 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend import KerasTensor -from keras_core.backend import any_symbolic_tensors -from keras_core.ops.operation import Operation -from keras_core.ops.operation_utils import compute_conv_output_shape +from keras import backend +from keras.api_export import keras_export +from keras.backend import KerasTensor +from keras.backend import any_symbolic_tensors +from keras.ops.operation import Operation +from keras.ops.operation_utils import compute_conv_output_shape class Resize(Operation): @@ -52,7 +52,7 @@ class Resize(Operation): ) -@keras_core_export("keras_core.ops.image.resize") +@keras_export("keras.ops.image.resize") def resize( image, size, @@ -84,17 +84,17 @@ def resize( Examples: >>> x = np.random.random((2, 4, 4, 3)) # batch of 2 RGB images - >>> y = keras_core.ops.image.resize(x, (2, 2)) + >>> y = keras.ops.image.resize(x, (2, 2)) >>> y.shape (2, 2, 2, 3) >>> x = np.random.random((4, 4, 3)) # single RGB image - >>> y = keras_core.ops.image.resize(x, (2, 2)) + >>> y = keras.ops.image.resize(x, (2, 2)) >>> y.shape (2, 2, 3) >>> x = np.random.random((2, 3, 4, 4)) # batch of 2 RGB images - >>> y = keras_core.ops.image.resize(x, (2, 2), + >>> y = keras.ops.image.resize(x, (2, 2), ... data_format="channels_first") >>> y.shape (2, 3, 2, 2) @@ -156,7 +156,7 @@ class AffineTransform(Operation): return KerasTensor(image.shape, dtype=image.dtype) -@keras_core_export("keras_core.ops.image.affine_transform") +@keras_export("keras.ops.image.affine_transform") def affine_transform( image, transform, @@ -218,13 +218,13 @@ def affine_transform( ... [1, 0, -20, 0, 1, -16, 0, 0], # translation ... ] ... ) - >>> y = keras_core.ops.image.affine_transform(x, transform) + >>> y = keras.ops.image.affine_transform(x, transform) >>> y.shape (2, 64, 80, 3) >>> x = np.random.random((64, 80, 3)) # single RGB image >>> transform = np.array([1.0, 0.5, -20, 0.5, 1.0, -16, 0, 0]) # shear - >>> y = keras_core.ops.image.affine_transform(x, transform) + >>> y = keras.ops.image.affine_transform(x, transform) >>> y.shape (64, 80, 3) @@ -235,7 +235,7 @@ def affine_transform( ... [1, 0, -20, 0, 1, -16, 0, 0], # translation ... ] ... ) - >>> y = keras_core.ops.image.affine_transform(x, transform, + >>> y = keras.ops.image.affine_transform(x, transform, ... data_format="channels_first") >>> y.shape (2, 3, 64, 80) @@ -311,7 +311,7 @@ class ExtractPatches(Operation): return KerasTensor(shape=out_shape, dtype=image.dtype) -@keras_core_export("keras_core.ops.image.extract_patches") +@keras_export("keras.ops.image.extract_patches") def extract_patches( image, size, @@ -349,11 +349,11 @@ def extract_patches( >>> image = np.random.random( ... (2, 20, 20, 3) ... ).astype("float32") # batch of 2 RGB images - >>> patches = keras_core.ops.image.extract_patches(image, (5, 5)) + >>> patches = keras.ops.image.extract_patches(image, (5, 5)) >>> patches.shape (2, 4, 4, 75) >>> image = np.random.random((20, 20, 3)).astype("float32") # 1 RGB image - >>> patches = keras_core.ops.image.extract_patches(image, (3, 3), (1, 1)) + >>> patches = keras.ops.image.extract_patches(image, (3, 3), (1, 1)) >>> patches.shape (18, 18, 27) """ @@ -448,7 +448,7 @@ class MapCoordinates(Operation): return KerasTensor(coordinates.shape[1:], dtype=image.dtype) -@keras_core_export("keras_core.ops.image.map_coordinates") +@keras_export("keras.ops.image.map_coordinates") def map_coordinates( input, coordinates, order, fill_mode="constant", fill_value=0 ): diff --git a/keras_core/ops/image_test.py b/keras/ops/image_test.py similarity index 98% rename from keras_core/ops/image_test.py rename to keras/ops/image_test.py index 6afe3f344..839d56033 100644 --- a/keras_core/ops/image_test.py +++ b/keras/ops/image_test.py @@ -6,10 +6,10 @@ import scipy.ndimage import tensorflow as tf from absl.testing import parameterized -from keras_core import backend -from keras_core import testing -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.ops import image as kimage +from keras import backend +from keras import testing +from keras.backend.common.keras_tensor import KerasTensor +from keras.ops import image as kimage class ImageOpsDynamicShapeTest(testing.TestCase): diff --git a/keras_core/ops/math.py b/keras/ops/math.py similarity index 92% rename from keras_core/ops/math.py rename to keras/ops/math.py index 413df3d33..8e508c3d0 100644 --- a/keras_core/ops/math.py +++ b/keras/ops/math.py @@ -1,11 +1,11 @@ """Commonly used math operations not included in NumPy.""" -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend import KerasTensor -from keras_core.backend import any_symbolic_tensors -from keras_core.ops.operation import Operation -from keras_core.ops.operation_utils import reduce_shape +from keras import backend +from keras.api_export import keras_export +from keras.backend import KerasTensor +from keras.backend import any_symbolic_tensors +from keras.ops.operation import Operation +from keras.ops.operation_utils import reduce_shape class SegmentSum(Operation): @@ -28,7 +28,7 @@ class SegmentSum(Operation): ) -@keras_core_export("keras_core.ops.segment_sum") +@keras_export("keras.ops.segment_sum") def segment_sum(data, segment_ids, num_segments=None, sorted=False): """Computes the sum of segments in a tensor. @@ -48,10 +48,10 @@ def segment_sum(data, segment_ids, num_segments=None, sorted=False): Example: - >>> data = keras_core.ops.convert_to_tensor([1, 2, 10, 20, 100, 200]) - >>> segment_ids = keras_core.ops.convert_to_tensor([0, 0, 1, 1, 2, 2]) + >>> data = keras.ops.convert_to_tensor([1, 2, 10, 20, 100, 200]) + >>> segment_ids = keras.ops.convert_to_tensor([0, 0, 1, 1, 2, 2]) >>> num_segments = 3 - >>> keras_core.ops.segment_sum(data, segment_ids,num_segments) + >>> keras.ops.segment_sum(data, segment_ids,num_segments) array([3, 30, 300], dtype=int32) """ if any_symbolic_tensors((data,)): @@ -81,7 +81,7 @@ class SegmentMax(Operation): ) -@keras_core_export("keras_core.ops.segment_max") +@keras_export("keras.ops.segment_max") def segment_max(data, segment_ids, num_segments=None, sorted=False): """Computes the max of segments in a tensor. @@ -101,10 +101,10 @@ def segment_max(data, segment_ids, num_segments=None, sorted=False): Example: - >>> data = keras_core.ops.convert_to_tensor([1, 2, 10, 20, 100, 200]) - >>> segment_ids = keras_core.ops.convert_to_tensor([0, 0, 1, 1, 2, 2]) + >>> data = keras.ops.convert_to_tensor([1, 2, 10, 20, 100, 200]) + >>> segment_ids = keras.ops.convert_to_tensor([0, 0, 1, 1, 2, 2]) >>> num_segments = 3 - >>> keras_core.ops.segment_max(data, segment_ids, num_segments) + >>> keras.ops.segment_max(data, segment_ids, num_segments) array([2, 20, 200], dtype=int32) """ if any_symbolic_tensors((data,)): @@ -133,7 +133,7 @@ class TopK(Operation): return backend.math.top_k(x, self.k, self.sorted) -@keras_core_export("keras_core.ops.top_k") +@keras_export("keras.ops.top_k") def top_k(x, k, sorted=True): """Finds the top-k values and their indices in a tensor. @@ -150,7 +150,7 @@ def top_k(x, k, sorted=True): Example: - >>> x = keras_core.ops.convert_to_tensor([5, 2, 7, 1, 9, 3]) + >>> x = keras.ops.convert_to_tensor([5, 2, 7, 1, 9, 3]) >>> values, indices = top_k(x, k=3) >>> print(values) array([9 7 5], shape=(3,), dtype=int32) @@ -175,7 +175,7 @@ class InTopK(Operation): return backend.math.in_top_k(targets, predictions, self.k) -@keras_core_export("keras_core.ops.in_top_k") +@keras_export("keras.ops.in_top_k") def in_top_k(targets, predictions, k): """Checks if the targets are in the top-k predictions. @@ -190,8 +190,8 @@ def in_top_k(targets, predictions, k): Example: - >>> targets = keras_core.ops.convert_to_tensor([2, 5, 3]) - >>> predictions = keras_core.ops.convert_to_tensor( + >>> targets = keras.ops.convert_to_tensor([2, 5, 3]) + >>> predictions = keras.ops.convert_to_tensor( ... [[0.1, 0.4, 0.6, 0.9, 0.5], ... [0.1, 0.7, 0.9, 0.8, 0.3], ... [0.1, 0.6, 0.9, 0.9, 0.5]]) @@ -217,7 +217,7 @@ class Logsumexp(Operation): return backend.math.logsumexp(x, axis=self.axis, keepdims=self.keepdims) -@keras_core_export("keras_core.ops.logsumexp") +@keras_export("keras.ops.logsumexp") def logsumexp(x, axis=None, keepdims=False): """Computes the logarithm of sum of exponentials of elements in a tensor. @@ -235,7 +235,7 @@ def logsumexp(x, axis=None, keepdims=False): Example: - >>> x = keras_core.ops.convert_to_tensor([1., 2., 3.]) + >>> x = keras.ops.convert_to_tensor([1., 2., 3.]) >>> logsumexp(x) 3.407606 """ @@ -286,7 +286,7 @@ class Qr(Operation): return backend.math.qr(x, mode=self.mode) -@keras_core_export("keras_core.ops.qr") +@keras_export("keras.ops.qr") def qr(x, mode="reduced"): """Computes the QR decomposition of a tensor. @@ -303,7 +303,7 @@ def qr(x, mode="reduced"): Example: - >>> x = keras_core.ops.convert_to_tensor([[1., 2.], [3., 4.], [5., 6.]]) + >>> x = keras.ops.convert_to_tensor([[1., 2.], [3., 4.], [5., 6.]]) >>> q, r = qr(x) >>> print(q) array([[-0.16903079 0.897085] @@ -345,7 +345,7 @@ class ExtractSequences(Operation): ) -@keras_core_export("keras_core.ops.extract_sequences") +@keras_export("keras.ops.extract_sequences") def extract_sequences(x, sequence_length, sequence_stride): """Expands the dimension of last axis into sequences of `sequence_length`. @@ -368,7 +368,7 @@ def extract_sequences(x, sequence_length, sequence_stride): Example: - >>> x = keras_core.ops.convert_to_tensor([1, 2, 3, 4, 5, 6]) + >>> x = keras.ops.convert_to_tensor([1, 2, 3, 4, 5, 6]) >>> extract_sequences(x, 3, 2) array([[1, 2, 3], [3, 4, 5]]) @@ -422,7 +422,7 @@ class FFT(Operation): return backend.math.fft(x) -@keras_core_export("keras_core.ops.fft") +@keras_export("keras.ops.fft") def fft(x): """Computes the Fast Fourier Transform along last axis of input. @@ -437,8 +437,8 @@ def fft(x): Example: >>> x = ( - ... keras_core.ops.convert_to_tensor([1., 2.]), - ... keras_core.ops.convert_to_tensor([0., 1.]), + ... keras.ops.convert_to_tensor([1., 2.]), + ... keras.ops.convert_to_tensor([0., 1.]), ... ) >>> fft(x) (array([ 3., -1.], dtype=float32), array([ 1., -1.], dtype=float32)) @@ -490,7 +490,7 @@ class FFT2(Operation): return backend.math.fft2(x) -@keras_core_export("keras_core.ops.fft2") +@keras_export("keras.ops.fft2") def fft2(x): """Computes the 2D Fast Fourier Transform along the last two axes of input. @@ -505,8 +505,8 @@ def fft2(x): Example: >>> x = ( - ... keras_core.ops.convert_to_tensor([[1., 2.], [2., 1.]]), - ... keras_core.ops.convert_to_tensor([[0., 1.], [1., 0.]]), + ... keras.ops.convert_to_tensor([[1., 2.], [2., 1.]]), + ... keras.ops.convert_to_tensor([[0., 1.], [1., 0.]]), ... ) >>> fft2(x) (array([[ 6., 0.], @@ -549,7 +549,7 @@ class RFFT(Operation): return backend.math.rfft(x, fft_length=self.fft_length) -@keras_core_export("keras_core.ops.rfft") +@keras_export("keras.ops.rfft") def rfft(x, fft_length=None): """Real-valued Fast Fourier Transform along the last axis of the input. @@ -577,7 +577,7 @@ def rfft(x, fft_length=None): Examples: - >>> x = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) + >>> x = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> rfft(x) (array([10.0, -2.5, -2.5]), array([0.0, 3.4409548, 0.81229924])) @@ -630,7 +630,7 @@ class IRFFT(Operation): return backend.math.irfft(x, fft_length=self.fft_length) -@keras_core_export("keras_core.ops.irfft") +@keras_export("keras.ops.irfft") def irfft(x, fft_length=None): """Inverse real-valued Fast Fourier transform along the last axis. @@ -661,8 +661,8 @@ def irfft(x, fft_length=None): Examples: - >>> real = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) - >>> imag = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) + >>> real = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) + >>> imag = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> irfft((real, imag)) array([0.66666667, -0.9106836, 0.24401694]) @@ -717,7 +717,7 @@ class STFT(Operation): ) -@keras_core_export("keras_core.ops.stft") +@keras_export("keras.ops.stft") def stft( x, sequence_length, sequence_stride, fft_length, window="hann", center=True ): @@ -748,7 +748,7 @@ def stft( Example: - >>> x = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) + >>> x = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> stft(x, 3, 2, 3) (array([[0.75, -0.375], [3.75, -1.875], @@ -837,7 +837,7 @@ class ISTFT(Operation): ) -@keras_core_export("keras_core.ops.istft") +@keras_export("keras.ops.istft") def istft( x, sequence_length, @@ -876,7 +876,7 @@ def istft( Example: - >>> x = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) + >>> x = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> istft(stft(x, 1, 1, 1), 1, 1, 1) array([0.0, 1.0, 2.0, 3.0, 4.0]) """ @@ -910,8 +910,8 @@ class Rsqrt(Operation): Example: - >>> data = keras_core.ops.convert_to_tensor([1.0, 10.0, 100.0]) - >>> keras_core.ops.rsqrt(data) + >>> data = keras.ops.convert_to_tensor([1.0, 10.0, 100.0]) + >>> keras.ops.rsqrt(data) array([1.0, 0.31622776, 0.1], dtype=float32) """ @@ -923,7 +923,7 @@ class Rsqrt(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export("keras_core.ops.rsqrt") +@keras_export("keras.ops.rsqrt") def rsqrt(x): if any_symbolic_tensors((x,)): return Rsqrt().symbolic_call(x) diff --git a/keras_core/ops/math_test.py b/keras/ops/math_test.py similarity index 99% rename from keras_core/ops/math_test.py rename to keras/ops/math_test.py index 9fdac5106..2f0c47f0f 100644 --- a/keras_core/ops/math_test.py +++ b/keras/ops/math_test.py @@ -5,10 +5,10 @@ import pytest import scipy.signal from absl.testing import parameterized -from keras_core import backend -from keras_core import testing -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.ops import math as kmath +from keras import backend +from keras import testing +from keras.backend.common.keras_tensor import KerasTensor +from keras.ops import math as kmath def _stft( diff --git a/keras_core/ops/nn.py b/keras/ops/nn.py similarity index 91% rename from keras_core/ops/nn.py rename to keras/ops/nn.py index 1f8919ff1..980f22067 100644 --- a/keras_core/ops/nn.py +++ b/keras/ops/nn.py @@ -1,16 +1,16 @@ """Commonly-used neural network operations not included in NumPy.""" -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend import KerasTensor -from keras_core.backend import any_symbolic_tensors -from keras_core.backend import standardize_data_format -from keras_core.backend.common.backend_utils import ( +from keras import backend +from keras.api_export import keras_export +from keras.backend import KerasTensor +from keras.backend import any_symbolic_tensors +from keras.backend import standardize_data_format +from keras.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras_core.ops import operation_utils -from keras_core.ops.operation import Operation -from keras_core.ops.operation_utils import reduce_shape +from keras.ops import operation_utils +from keras.ops.operation import Operation +from keras.ops.operation_utils import reduce_shape class Relu(Operation): @@ -21,7 +21,7 @@ class Relu(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.relu", "keras_core.ops.nn.relu"]) +@keras_export(["keras.ops.relu", "keras.ops.nn.relu"]) def relu(x): """Rectified linear unit activation function. @@ -35,8 +35,8 @@ def relu(x): Example: - >>> x1 = keras_core.ops.convert_to_tensor([-1.0, 0.0, 1.0, 0.2]) - >>> keras_core.ops.relu(x1) + >>> x1 = keras.ops.convert_to_tensor([-1.0, 0.0, 1.0, 0.2]) + >>> keras.ops.relu(x1) array([0.0, 0.0, 1.0, 0.2], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -52,7 +52,7 @@ class Relu6(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.relu6", "keras_core.ops.nn.relu6"]) +@keras_export(["keras.ops.relu6", "keras.ops.nn.relu6"]) def relu6(x): """Rectified linear unit activation function with upper bound of 6. @@ -66,8 +66,8 @@ def relu6(x): Example: - >>> x = keras_core.ops.convert_to_tensor([-3.0, -2.0, 0.1, 0.2, 6.0, 8.0]) - >>> keras_core.ops.relu6(x) + >>> x = keras.ops.convert_to_tensor([-3.0, -2.0, 0.1, 0.2, 6.0, 8.0]) + >>> keras.ops.relu6(x) array([0.0, 0.0, 0.1, 0.2, 6.0, 6.0], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -83,7 +83,7 @@ class Sigmoid(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.sigmoid", "keras_core.ops.nn.sigmoid"]) +@keras_export(["keras.ops.sigmoid", "keras.ops.nn.sigmoid"]) def sigmoid(x): """Sigmoid activation function. @@ -97,8 +97,8 @@ def sigmoid(x): Example: - >>> x = keras_core.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0]) - >>> keras_core.ops.sigmoid(x) + >>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0]) + >>> keras.ops.sigmoid(x) array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32) """ @@ -115,7 +115,7 @@ class Softplus(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.softplus", "keras_core.ops.nn.softplus"]) +@keras_export(["keras.ops.softplus", "keras.ops.nn.softplus"]) def softplus(x): """Softplus activation function. @@ -130,8 +130,8 @@ def softplus(x): Example: - >>> x = keras_core.ops.convert_to_tensor([-0.555, 0.0, 0.555]) - >>> keras_core.ops.softplus(x) + >>> x = keras.ops.convert_to_tensor([-0.555, 0.0, 0.555]) + >>> keras.ops.softplus(x) array([0.45366603, 0.6931472, 1.008666], dtype=float32) """ @@ -148,7 +148,7 @@ class Softsign(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.softsign", "keras_core.ops.nn.softsign"]) +@keras_export(["keras.ops.softsign", "keras.ops.nn.softsign"]) def softsign(x): """Softsign activation function. @@ -162,8 +162,8 @@ def softsign(x): Example: - >>> x = keras_core.ops.convert_to_tensor([-0.100, -10.0, 1.0, 0.0, 100.0]) - >>> keras_core.ops.softsign(x) + >>> x = keras.ops.convert_to_tensor([-0.100, -10.0, 1.0, 0.0, 100.0]) + >>> keras.ops.softsign(x) Array([-0.09090909, -0.90909094, 0.5, 0.0, 0.990099], dtype=float32) """ @@ -180,12 +180,12 @@ class Silu(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.silu", - "keras_core.ops.nn.silu", - "keras_core.ops.swish", - "keras_core.ops.nn.swish", + "keras.ops.silu", + "keras.ops.nn.silu", + "keras.ops.swish", + "keras.ops.nn.swish", ] ) def silu(x): @@ -202,10 +202,10 @@ def silu(x): Example: - >>> x = keras_core.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0]) - >>> keras_core.ops.sigmoid(x) + >>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0]) + >>> keras.ops.sigmoid(x) array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32) - >>> keras_core.ops.silu(x) + >>> keras.ops.silu(x) array([-0.0148357, 0.7310586, 0.0, 0.7310586, 5.9851646], dtype=float32) """ @@ -222,10 +222,10 @@ class LogSigmoid(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.log_sigmoid", - "keras_core.ops.nn.log_sigmoid", + "keras.ops.log_sigmoid", + "keras.ops.nn.log_sigmoid", ] ) def log_sigmoid(x): @@ -241,8 +241,8 @@ def log_sigmoid(x): Example: - >>> x = keras_core.ops.convert_to_tensor([-0.541391, 0.0, 0.50, 5.0]) - >>> keras_core.ops.log_sigmoid(x) + >>> x = keras.ops.convert_to_tensor([-0.541391, 0.0, 0.50, 5.0]) + >>> keras.ops.log_sigmoid(x) array([-1.0000418, -0.6931472, -0.474077, -0.00671535], dtype=float32) """ @@ -263,8 +263,8 @@ class LeakyRelu(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export( - ["keras_core.ops.leaky_relu", "keras_core.ops.nn.leaky_relu"] +@keras_export( + ["keras.ops.leaky_relu", "keras.ops.nn.leaky_relu"] ) def leaky_relu(x, negative_slope=0.2): """Leaky version of a Rectified Linear Unit activation function. @@ -284,7 +284,7 @@ def leaky_relu(x, negative_slope=0.2): Example: >>> x = np.array([-1., 0., 1.]) - >>> x_leaky_relu = keras_core.ops.leaky_relu(x) + >>> x_leaky_relu = keras.ops.leaky_relu(x) >>> print(x_leaky_relu) array([-0.2, 0. , 1. ], shape=(3,), dtype=float64) @@ -302,10 +302,10 @@ class HardSigmoid(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.hard_sigmoid", - "keras_core.ops.nn.hard_sigmoid", + "keras.ops.hard_sigmoid", + "keras.ops.nn.hard_sigmoid", ] ) def hard_sigmoid(x): @@ -324,7 +324,7 @@ def hard_sigmoid(x): Example: >>> x = np.array([-1., 0., 1.]) - >>> x_hard_sigmoid = keras_core.ops.hard_sigmoid(x) + >>> x_hard_sigmoid = keras.ops.hard_sigmoid(x) >>> print(x_hard_sigmoid) array([0.3, 0.5, 0.7], shape=(3,), dtype=float64) @@ -346,7 +346,7 @@ class Elu(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.elu", "keras_core.ops.nn.elu"]) +@keras_export(["keras.ops.elu", "keras.ops.nn.elu"]) def elu(x, alpha=1.0): """Exponential Linear Unit activation function. @@ -364,7 +364,7 @@ def elu(x, alpha=1.0): Example: >>> x = np.array([-1., 0., 1.]) - >>> x_elu = keras_core.ops.elu(x) + >>> x_elu = keras.ops.elu(x) >>> print(x_elu) array([-0.63212055, 0., 1.], shape=(3,), dtype=float64) @@ -382,7 +382,7 @@ class Selu(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.selu", "keras_core.ops.nn.selu"]) +@keras_export(["keras.ops.selu", "keras.ops.nn.selu"]) def selu(x): """Scaled Exponential Linear Unit (SELU) activation function. @@ -400,7 +400,7 @@ def selu(x): Example: >>> x = np.array([-1., 0., 1.]) - >>> x_selu = keras_core.ops.selu(x) + >>> x_selu = keras.ops.selu(x) >>> print(x_selu) array([-1.11133055, 0., 1.05070098], shape=(3,), dtype=float64) @@ -422,7 +422,7 @@ class Gelu(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.gelu", "keras_core.ops.nn.gelu"]) +@keras_export(["keras.ops.gelu", "keras.ops.nn.gelu"]) def gelu(x, approximate=True): """Gaussian Error Linear Unit (GELU) activation function. @@ -443,7 +443,7 @@ def gelu(x, approximate=True): Example: >>> x = np.array([-1., 0., 1.]) - >>> x_gelu = keras_core.ops.gelu(x) + >>> x_gelu = keras.ops.gelu(x) >>> print(x_gelu) array([-0.15865525, 0., 0.84134475], shape=(3,), dtype=float64) @@ -465,7 +465,7 @@ class Softmax(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.softmax", "keras_core.ops.nn.softmax"]) +@keras_export(["keras.ops.softmax", "keras.ops.nn.softmax"]) def softmax(x, axis=-1): """Softmax activation function. @@ -488,7 +488,7 @@ def softmax(x, axis=-1): Example: >>> x = np.array([-1., 0., 1.]) - >>> x_softmax = keras_core.ops.softmax(x) + >>> x_softmax = keras.ops.softmax(x) >>> print(x_softmax) array([0.09003057, 0.24472847, 0.66524096], shape=(3,), dtype=float64) @@ -530,10 +530,10 @@ class LogSoftmax(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.log_softmax", - "keras_core.ops.nn.log_softmax", + "keras.ops.log_softmax", + "keras.ops.nn.log_softmax", ] ) def log_softmax(x, axis=-1): @@ -553,7 +553,7 @@ def log_softmax(x, axis=-1): Example: >>> x = np.array([-1., 0., 1.]) - >>> x_log_softmax = keras_core.ops.log_softmax(x) + >>> x_log_softmax = keras.ops.log_softmax(x) >>> print(x_log_softmax) array([-2.40760596, -1.40760596, -0.40760596], shape=(3,), dtype=float64) @@ -617,7 +617,7 @@ class MaxPool(Operation): return KerasTensor(output_shape, dtype=inputs.dtype) -@keras_core_export(["keras_core.ops.max_pool", "keras_core.ops.nn.max_pool"]) +@keras_export(["keras.ops.max_pool", "keras.ops.nn.max_pool"]) def max_pool( inputs, pool_size, @@ -702,10 +702,10 @@ class AveragePool(Operation): return KerasTensor(output_shape, dtype=inputs.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.average_pool", - "keras_core.ops.nn.average_pool", + "keras.ops.average_pool", + "keras.ops.nn.average_pool", ] ) def average_pool( @@ -797,7 +797,7 @@ class Conv(Operation): return KerasTensor(output_shape, dtype=inputs.dtype) -@keras_core_export(["keras_core.ops.conv", "keras_core.ops.nn.conv"]) +@keras_export(["keras.ops.conv", "keras.ops.nn.conv"]) def conv( inputs, kernel, @@ -889,10 +889,10 @@ class DepthwiseConv(Operation): return KerasTensor(output_shape, dtype=inputs.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.depthwise_conv", - "keras_core.ops.nn.depthwise_conv", + "keras.ops.depthwise_conv", + "keras.ops.nn.depthwise_conv", ] ) def depthwise_conv( @@ -997,10 +997,10 @@ class SeparableConv(Operation): return KerasTensor(output_shape, dtype=inputs.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.separable_conv", - "keras_core.ops.nn.separable_conv", + "keras.ops.separable_conv", + "keras.ops.nn.separable_conv", ] ) def separable_conv( @@ -1118,10 +1118,10 @@ class ConvTranspose(Operation): return KerasTensor(output_shape, dtype=inputs.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.conv_transpose", - "keras_core.ops.nn.conv_transpose", + "keras.ops.conv_transpose", + "keras.ops.nn.conv_transpose", ] ) def conv_transpose( @@ -1218,7 +1218,7 @@ class OneHot(Operation): return KerasTensor(x_shape, dtype=self.dtype) -@keras_core_export(["keras_core.ops.one_hot", "keras_core.ops.nn.one_hot"]) +@keras_export(["keras.ops.one_hot", "keras.ops.nn.one_hot"]) def one_hot(x, num_classes, axis=-1, dtype=None): """Converts integer tensor `x` into a one-hot tensor. @@ -1244,7 +1244,7 @@ def one_hot(x, num_classes, axis=-1, dtype=None): Example: - >>> x = keras_core.ops.convert_to_tensor([1, 3, 2, 0]) + >>> x = keras.ops.convert_to_tensor([1, 3, 2, 0]) >>> one_hot(x, num_classes=4) array([[0. 1. 0. 0.] [0. 0. 0. 1.] @@ -1278,10 +1278,10 @@ class BinaryCrossentropy(Operation): return KerasTensor(output.shape, dtype=output.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.binary_crossentropy", - "keras_core.ops.nn.binary_crossentropy", + "keras.ops.binary_crossentropy", + "keras.ops.nn.binary_crossentropy", ] ) def binary_crossentropy(target, output, from_logits=False): @@ -1310,8 +1310,8 @@ def binary_crossentropy(target, output, from_logits=False): Example: - >>> target = keras_core.ops.convert_to_tensor([0, 1, 1, 0]) - >>> output = keras_core.ops.convert_to_tensor([0.1, 0.9, 0.8, 0.2]) + >>> target = keras.ops.convert_to_tensor([0, 1, 1, 0]) + >>> output = keras.ops.convert_to_tensor([0.1, 0.9, 0.8, 0.2]) >>> binary_crossentropy(target, output) array([0.10536054 0.10536054 0.22314355 0.22314355], shape=(4,), dtype=float32) @@ -1352,10 +1352,10 @@ class CategoricalCrossentropy(Operation): return KerasTensor(output.shape[:-1], dtype=output.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.categorical_crossentropy", - "keras_core.ops.nn.categorical_crossentropy", + "keras.ops.categorical_crossentropy", + "keras.ops.nn.categorical_crossentropy", ] ) def categorical_crossentropy(target, output, from_logits=False, axis=-1): @@ -1389,11 +1389,11 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1): Example: - >>> target = keras_core.ops.convert_to_tensor( + >>> target = keras.ops.convert_to_tensor( ... [[1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]) - >>> output = keras_core.ops.convert_to_tensor( + >>> output = keras.ops.convert_to_tensor( ... [[0.9, 0.05, 0.05], ... [0.1, 0.8, 0.1], ... [0.2, 0.3, 0.5]]) @@ -1439,10 +1439,10 @@ class SparseCategoricalCrossentropy(Operation): return KerasTensor(output.shape[:-1], dtype=output.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.sparse_categorical_crossentropy", - "keras_core.ops.nn.sparse_categorical_crossentropy", + "keras.ops.sparse_categorical_crossentropy", + "keras.ops.nn.sparse_categorical_crossentropy", ] ) def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): @@ -1477,8 +1477,8 @@ def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): Example: - >>> target = keras_core.ops.convert_to_tensor([0, 1, 2], dtype=int32) - >>> output = keras_core.ops.convert_to_tensor( + >>> target = keras.ops.convert_to_tensor([0, 1, 2], dtype=int32) + >>> output = keras.ops.convert_to_tensor( ... [[0.9, 0.05, 0.05], ... [0.1, 0.8, 0.1], ... [0.2, 0.3, 0.5]]) @@ -1529,10 +1529,10 @@ class MultiHot(Operation): return KerasTensor(x_shape, dtype=inputs.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.multi_hot", - "keras_core.ops.nn.multi_hot", + "keras.ops.multi_hot", + "keras.ops.nn.multi_hot", ] ) def multi_hot(inputs, num_tokens, axis=-1, dtype=None): @@ -1554,8 +1554,8 @@ def multi_hot(inputs, num_tokens, axis=-1, dtype=None): Example: - >>> data = keras_core.ops.convert_to_tensor([0, 4]) - >>> keras_core.ops.multi_hot(data, num_tokens=5) + >>> data = keras.ops.convert_to_tensor([0, 4]) + >>> keras.ops.multi_hot(data, num_tokens=5) array([1.0, 0.0, 0.0, 0.0, 1.0], dtype=float32) """ @@ -1587,10 +1587,10 @@ class Moments(Operation): ) -@keras_core_export( +@keras_export( [ - "keras_core.ops.moments", - "keras_core.ops.nn.moments", + "keras.ops.moments", + "keras.ops.nn.moments", ] ) def moments(x, axes, keepdims=False): @@ -1611,8 +1611,8 @@ def moments(x, axes, keepdims=False): Example: - >>> x = keras_core.ops.convert_to_tensor([0, 1, 2, 3, 100], dtype="float32") - >>> keras_core.ops.moments(x, axes=[0]) + >>> x = keras.ops.convert_to_tensor([0, 1, 2, 3, 100], dtype="float32") + >>> keras.ops.moments(x, axes=[0]) (array(21.2, dtype=float32), array(1553.3601, dtype=float32)) """ diff --git a/keras_core/ops/nn_test.py b/keras/ops/nn_test.py similarity index 99% rename from keras_core/ops/nn_test.py rename to keras/ops/nn_test.py index 7f27aa3cc..67c438df8 100644 --- a/keras_core/ops/nn_test.py +++ b/keras/ops/nn_test.py @@ -3,10 +3,10 @@ import pytest import tensorflow as tf from absl.testing import parameterized -from keras_core import backend -from keras_core import testing -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.ops import nn as knn +from keras import backend +from keras import testing +from keras.backend.common.keras_tensor import KerasTensor +from keras.ops import nn as knn class NNOpsDynamicShapeTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/ops/node.py b/keras/ops/node.py similarity index 98% rename from keras_core/ops/node.py rename to keras/ops/node.py index 5d26feaf2..d7d09f8d2 100644 --- a/keras_core/ops/node.py +++ b/keras/ops/node.py @@ -2,8 +2,8 @@ import collections import tree -from keras_core.backend import KerasTensor -from keras_core.ops.symbolic_arguments import SymbolicArguments +from keras.backend import KerasTensor +from keras.ops.symbolic_arguments import SymbolicArguments class Node: diff --git a/keras_core/ops/node_test.py b/keras/ops/node_test.py similarity index 93% rename from keras_core/ops/node_test.py rename to keras/ops/node_test.py index ea803c291..b00781f5f 100644 --- a/keras_core/ops/node_test.py +++ b/keras/ops/node_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras_core import Layer -from keras_core import testing -from keras_core.backend import KerasTensor -from keras_core.ops.node import Node +from keras import Layer +from keras import testing +from keras.backend import KerasTensor +from keras.ops.node import Node class DummyLayer(Layer): diff --git a/keras_core/ops/numpy.py b/keras/ops/numpy.py similarity index 90% rename from keras_core/ops/numpy.py rename to keras/ops/numpy.py index a11b75f38..e00ff7a11 100644 --- a/keras_core/ops/numpy.py +++ b/keras/ops/numpy.py @@ -143,13 +143,13 @@ import re import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend import KerasTensor -from keras_core.backend import any_symbolic_tensors -from keras_core.ops import operation_utils -from keras_core.ops.operation import Operation -from keras_core.ops.operation_utils import reduce_shape +from keras import backend +from keras.api_export import keras_export +from keras.backend import KerasTensor +from keras.backend import any_symbolic_tensors +from keras.ops import operation_utils +from keras.ops.operation import Operation +from keras.ops.operation_utils import reduce_shape def broadcast_shapes(shape1, shape2): @@ -262,11 +262,11 @@ class Absolute(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.absolute", "keras_core.ops.numpy.absolute"]) +@keras_export(["keras.ops.absolute", "keras.ops.numpy.absolute"]) def absolute(x): """Compute the absolute value element-wise. - `keras_core.ops.abs` is a shorthand for this function. + `keras.ops.abs` is a shorthand for this function. Args: x: Input tensor. @@ -276,8 +276,8 @@ def absolute(x): Example: - >>> x = keras_core.ops.convert_to_tensor([-1.2, 1.2]) - >>> keras_core.ops.absolute(x) + >>> x = keras.ops.convert_to_tensor([-1.2, 1.2]) + >>> keras.ops.absolute(x) array([1.2, 1.2], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -289,9 +289,9 @@ class Abs(Absolute): pass -@keras_core_export(["keras_core.ops.abs", "keras_core.ops.numpy.abs"]) +@keras_export(["keras.ops.abs", "keras.ops.numpy.abs"]) def abs(x): - """Shorthand for `keras_core.ops.absolute`.""" + """Shorthand for `keras.ops.absolute`.""" return absolute(x) @@ -309,7 +309,7 @@ class Add(Operation): return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse) -@keras_core_export(["keras_core.ops.add", "keras_core.ops.numpy.add"]) +@keras_export(["keras.ops.add", "keras.ops.numpy.add"]) def add(x1, x2): """Add arguments element-wise. @@ -321,18 +321,18 @@ def add(x1, x2): The tensor containing the element-wise sum of `x1` and `x2`. Examples: - >>> x1 = keras_core.ops.convert_to_tensor([1, 4]) - >>> x2 = keras_core.ops.convert_to_tensor([5, 6]) - >>> keras_core.ops.add(x1, x2) + >>> x1 = keras.ops.convert_to_tensor([1, 4]) + >>> x2 = keras.ops.convert_to_tensor([5, 6]) + >>> keras.ops.add(x1, x2) array([6, 10], dtype=int32) - `keras_core.ops.add` also broadcasts shapes: - >>> x1 = keras_core.ops.convert_to_tensor( + `keras.ops.add` also broadcasts shapes: + >>> x1 = keras.ops.convert_to_tensor( ... [[5, 4], ... [5, 6]] ... ) - >>> x2 = keras_core.ops.convert_to_tensor([5, 6]) - >>> keras_core.ops.add(x1, x2) + >>> x2 = keras.ops.convert_to_tensor([5, 6]) + >>> keras.ops.add(x1, x2) array([[10 10] [10 12]], shape=(2, 2), dtype=int32) """ @@ -368,7 +368,7 @@ class All(Operation): ) -@keras_core_export(["keras_core.ops.all", "keras_core.ops.numpy.all"]) +@keras_export(["keras.ops.all", "keras.ops.numpy.all"]) def all(x, axis=None, keepdims=False): """Test whether all array elements along a given axis evaluate to `True`. @@ -387,17 +387,17 @@ def all(x, axis=None, keepdims=False): The tensor containing the logical AND reduction over the `axis`. Examples: - >>> x = keras_core.ops.convert_to_tensor([True, False]) - >>> keras_core.ops.all(x) + >>> x = keras.ops.convert_to_tensor([True, False]) + >>> keras.ops.all(x) array(False, shape=(), dtype=bool) - >>> x = keras_core.ops.convert_to_tensor([[True, False], [True, True]]) - >>> keras_core.ops.all(x, axis=0) + >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) + >>> keras.ops.all(x, axis=0) array([ True False], shape=(2,), dtype=bool) `keepdims=True` outputs a tensor with dimensions reduced to one. - >>> x = keras_core.ops.convert_to_tensor([[True, False], [True, True]]) - >>> keras_core.ops.all(x, keepdims=True) + >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) + >>> keras.ops.all(x, keepdims=True) array([[False]], shape=(1, 1), dtype=bool) """ if any_symbolic_tensors((x,)): @@ -432,7 +432,7 @@ class Any(Operation): ) -@keras_core_export(["keras_core.ops.any", "keras_core.ops.numpy.any"]) +@keras_export(["keras.ops.any", "keras.ops.numpy.any"]) def any(x, axis=None, keepdims=False): """Test whether any array element along a given axis evaluates to `True`. @@ -451,17 +451,17 @@ def any(x, axis=None, keepdims=False): The tensor containing the logical OR reduction over the `axis`. Examples: - >>> x = keras_core.ops.convert_to_tensor([True, False]) - >>> keras_core.ops.any(x) + >>> x = keras.ops.convert_to_tensor([True, False]) + >>> keras.ops.any(x) array(True, shape=(), dtype=bool) - >>> x = keras_core.ops.convert_to_tensor([[True, False], [True, True]]) - >>> keras_core.ops.any(x, axis=0) + >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) + >>> keras.ops.any(x, axis=0) array([ True True], shape=(2,), dtype=bool) `keepdims=True` outputs a tensor with dimensions reduced to one. - >>> x = keras_core.ops.convert_to_tensor([[True, False], [True, True]]) - >>> keras_core.ops.all(x, keepdims=True) + >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) + >>> keras.ops.all(x, keepdims=True) array([[False]], shape=(1, 1), dtype=bool) """ if any_symbolic_tensors((x,)): @@ -491,7 +491,7 @@ class Amax(Operation): ) -@keras_core_export(["keras_core.ops.amax", "keras_core.ops.numpy.amax"]) +@keras_export(["keras.ops.amax", "keras.ops.numpy.amax"]) def amax(x, axis=None, keepdims=False): """Returns the maximum of an array or maximum value along an axis. @@ -511,16 +511,16 @@ def amax(x, axis=None, keepdims=False): the specified axis. Examples: - >>> x = keras_core.ops.convert_to_tensor([[1, 3, 5], [2, 3, 6]]) - >>> keras_core.ops.amax(x) + >>> x = keras.ops.convert_to_tensor([[1, 3, 5], [2, 3, 6]]) + >>> keras.ops.amax(x) array(6, dtype=int32) - >>> x = keras_core.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]]) - >>> keras_core.ops.amax(x, axis=0) + >>> x = keras.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]]) + >>> keras.ops.amax(x, axis=0) array([1, 6, 8], dtype=int32) - >>> x = keras_core.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]]) - >>> keras_core.ops.amax(x, axis=1, keepdims=True) + >>> x = keras.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]]) + >>> keras.ops.amax(x, axis=1, keepdims=True) array([[8], [5]], dtype=int32) """ if any_symbolic_tensors((x,)): @@ -546,7 +546,7 @@ class Amin(Operation): ) -@keras_core_export(["keras_core.ops.amin", "keras_core.ops.numpy.amin"]) +@keras_export(["keras.ops.amin", "keras.ops.numpy.amin"]) def amin(x, axis=None, keepdims=False): """Returns the minimum of an array or minimum value along an axis. @@ -566,16 +566,16 @@ def amin(x, axis=None, keepdims=False): the specified axis. Examples: - >>> x = keras_core.ops.convert_to_tensor([1, 3, 5, 2, 3, 6]) - >>> keras_core.ops.amin(x) + >>> x = keras.ops.convert_to_tensor([1, 3, 5, 2, 3, 6]) + >>> keras.ops.amin(x) array(1, dtype=int32) - >>> x = keras_core.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]]) - >>> keras_core.ops.amin(x, axis=0) + >>> x = keras.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]]) + >>> keras.ops.amin(x, axis=0) array([1,5,3], dtype=int32) - >>> x = keras_core.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]]) - >>> keras_core.ops.amin(x, axis=1, keepdims=True) + >>> x = keras.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]]) + >>> keras.ops.amin(x, axis=1, keepdims=True) array([[1],[3]], dtype=int32) """ if any_symbolic_tensors((x,)): @@ -613,7 +613,7 @@ class Append(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.append", "keras_core.ops.numpy.append"]) +@keras_export(["keras.ops.append", "keras.ops.numpy.append"]) def append( x1, x2, @@ -631,20 +631,20 @@ def append( A tensor with the values of `x2` appended to `x1`. Examples: - >>> x1 = keras_core.ops.convert_to_tensor([1, 2, 3]) - >>> x2 = keras_core.ops.convert_to_tensor([[4, 5, 6], [7, 8, 9]]) - >>> keras_core.ops.append(x1, x2) + >>> x1 = keras.ops.convert_to_tensor([1, 2, 3]) + >>> x2 = keras.ops.convert_to_tensor([[4, 5, 6], [7, 8, 9]]) + >>> keras.ops.append(x1, x2) array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32) When `axis` is specified, `x1` and `x2` must have compatible shapes. - >>> x1 = keras_core.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]]) - >>> x2 = keras_core.ops.convert_to_tensor([[7, 8, 9]]) - >>> keras_core.ops.append(x1, x2, axis=0) + >>> x1 = keras.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]]) + >>> x2 = keras.ops.convert_to_tensor([[7, 8, 9]]) + >>> keras.ops.append(x1, x2, axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=int32) - >>> x3 = keras_core.ops.convert_to_tensor([7, 8, 9]) - >>> keras_core.ops.append(x1, x3, axis=0) + >>> x3 = keras.ops.convert_to_tensor([7, 8, 9]) + >>> keras.ops.append(x1, x3, axis=0) Traceback (most recent call last): ... TypeError: Cannot concatenate arrays with different numbers of @@ -666,7 +666,7 @@ class Arange(Operation): return KerasTensor(output_shape, dtype=dtype) -@keras_core_export(["keras_core.ops.arange", "keras_core.ops.numpy.arange"]) +@keras_export(["keras.ops.arange", "keras.ops.numpy.arange"]) def arange(start, stop=None, step=1, dtype=None): """Return evenly spaced values within a given interval. @@ -700,16 +700,16 @@ def arange(start, stop=None, step=1, dtype=None): rule may result in the last element of out being greater than stop. Examples: - >>> keras_core.ops.arange(3) + >>> keras.ops.arange(3) array([0, 1, 2], dtype=int32) - >>> keras_core.ops.arange(3.0) + >>> keras.ops.arange(3.0) array([0., 1., 2.], dtype=float32) - >>> keras_core.ops.arange(3, 7) + >>> keras.ops.arange(3, 7) array([3, 4, 5, 6], dtype=int32) - >>> keras_core.ops.arange(3, 7, 2) + >>> keras.ops.arange(3, 7, 2) array([3, 5], dtype=int32) """ return backend.numpy.arange(start, stop, step=step, dtype=dtype) @@ -723,7 +723,7 @@ class Arccos(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.arccos", "keras_core.ops.numpy.arccos"]) +@keras_export(["keras.ops.arccos", "keras.ops.numpy.arccos"]) def arccos(x): """Trigonometric inverse cosine, element-wise. @@ -737,8 +737,8 @@ def arccos(x): x-coordinate in radians `[0, pi]`. Example: - >>> x = keras_core.ops.convert_to_tensor([1, -1]) - >>> keras_core.ops.arccos(x) + >>> x = keras.ops.convert_to_tensor([1, -1]) + >>> keras.ops.arccos(x) array([0.0, 3.1415927], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -754,7 +754,7 @@ class Arccosh(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.arccosh", "keras_core.ops.numpy.arccosh"]) +@keras_export(["keras.ops.arccosh", "keras.ops.numpy.arccosh"]) def arccosh(x): """Inverse hyperbolic cosine, element-wise. @@ -765,8 +765,8 @@ def arccosh(x): Output tensor of same shape as x. Example: - >>> x = keras_core.ops.convert_to_tensor([10, 100]) - >>> keras_core.ops.arccosh(x) + >>> x = keras.ops.convert_to_tensor([10, 100]) + >>> keras.ops.arccosh(x) array([2.993223, 5.298292], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -782,7 +782,7 @@ class Arcsin(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.arcsin", "keras_core.ops.numpy.arcsin"]) +@keras_export(["keras.ops.arcsin", "keras.ops.numpy.arcsin"]) def arcsin(x): """Inverse sine, element-wise. @@ -794,8 +794,8 @@ def arcsin(x): the closed interval `[-pi/2, pi/2]`. Example: - >>> x = keras_core.ops.convert_to_tensor([1, -1, 0]) - >>> keras_core.ops.arcsin(x) + >>> x = keras.ops.convert_to_tensor([1, -1, 0]) + >>> keras.ops.arcsin(x) array([ 1.5707964, -1.5707964, 0.], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -811,7 +811,7 @@ class Arcsinh(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.arcsinh", "keras_core.ops.numpy.arcsinh"]) +@keras_export(["keras.ops.arcsinh", "keras.ops.numpy.arcsinh"]) def arcsinh(x): """Inverse hyperbolic sine, element-wise. @@ -822,8 +822,8 @@ def arcsinh(x): Output tensor of same shape as `x`. Example: - >>> x = keras_core.ops.convert_to_tensor([1, -1, 0]) - >>> keras_core.ops.arcsinh(x) + >>> x = keras.ops.convert_to_tensor([1, -1, 0]) + >>> keras.ops.arcsinh(x) array([0.88137364, -0.88137364, 0.0], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -839,7 +839,7 @@ class Arctan(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.arctan", "keras_core.ops.numpy.arctan"]) +@keras_export(["keras.ops.arctan", "keras.ops.numpy.arctan"]) def arctan(x): """Trigonometric inverse tangent, element-wise. @@ -851,8 +851,8 @@ def arctan(x): `[-pi/2, pi/2]`. Example: - >>> x = keras_core.ops.convert_to_tensor([0, 1]) - >>> keras_core.ops.arctan(x) + >>> x = keras.ops.convert_to_tensor([0, 1]) + >>> keras.ops.arctan(x) array([0., 0.7853982], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -871,7 +871,7 @@ class Arctan2(Operation): return KerasTensor(outputs_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.arctan2", "keras_core.ops.numpy.arctan2"]) +@keras_export(["keras.ops.arctan2", "keras.ops.numpy.arctan2"]) def arctan2(x1, x2): """Element-wise arc tangent of `x1/x2` choosing the quadrant correctly. @@ -892,21 +892,21 @@ def arctan2(x1, x2): Examples: Consider four points in different quadrants: - >>> x = keras_core.ops.convert_to_tensor([-1, +1, +1, -1]) - >>> y = keras_core.ops.convert_to_tensor([-1, -1, +1, +1]) - >>> keras_core.ops.arctan2(y, x) * 180 / numpy.pi + >>> x = keras.ops.convert_to_tensor([-1, +1, +1, -1]) + >>> y = keras.ops.convert_to_tensor([-1, -1, +1, +1]) + >>> keras.ops.arctan2(y, x) * 180 / numpy.pi array([-135., -45., 45., 135.], dtype=float32) Note the order of the parameters. `arctan2` is defined also when x2=0 and at several other points, obtaining values in the range `[-pi, pi]`: - >>> keras_core.ops.arctan2( - ... keras_core.ops.array([1., -1.]), - ... keras_core.ops.array([0., 0.]), + >>> keras.ops.arctan2( + ... keras.ops.array([1., -1.]), + ... keras.ops.array([0., 0.]), ... ) array([ 1.5707964, -1.5707964], dtype=float32) - >>> keras_core.ops.arctan2( - ... keras_core.ops.array([0., 0., numpy.inf]), - ... keras_core.ops.array([+0., -0., numpy.inf]), + >>> keras.ops.arctan2( + ... keras.ops.array([0., 0., numpy.inf]), + ... keras.ops.array([+0., -0., numpy.inf]), ... ) array([0., 3.1415925, 0.7853982], dtype=float32) """ @@ -923,7 +923,7 @@ class Arctanh(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.arctanh", "keras_core.ops.numpy.arctanh"]) +@keras_export(["keras.ops.arctanh", "keras.ops.numpy.arctanh"]) def arctanh(x): """Inverse hyperbolic tangent, element-wise. @@ -954,7 +954,7 @@ class Argmax(Operation): ) -@keras_core_export(["keras_core.ops.argmax", "keras_core.ops.numpy.argmax"]) +@keras_export(["keras.ops.argmax", "keras.ops.numpy.argmax"]) def argmax(x, axis=None): """Returns the indices of the maximum values along an axis. @@ -968,15 +968,15 @@ def argmax(x, axis=None): along `axis` removed. Example: - >>> x = keras_core.ops.arange(6).reshape(2, 3) + 10 + >>> x = keras.ops.arange(6).reshape(2, 3) + 10 >>> x array([[10, 11, 12], [13, 14, 15]], dtype=int32) - >>> keras_core.ops.argmax(x) + >>> keras.ops.argmax(x) array(5, dtype=int32) - >>> keras_core.ops.argmax(x, axis=0) + >>> keras.ops.argmax(x, axis=0) array([1, 1, 1], dtype=int32) - >>> keras_core.ops.argmax(x, axis=1) + >>> keras.ops.argmax(x, axis=1) array([2, 2], dtype=int32) """ if any_symbolic_tensors((x,)): @@ -1000,7 +1000,7 @@ class Argmin(Operation): ) -@keras_core_export(["keras_core.ops.argmin", "keras_core.ops.numpy.argmin"]) +@keras_export(["keras.ops.argmin", "keras.ops.numpy.argmin"]) def argmin(x, axis=None): """Returns the indices of the minium values along an axis. @@ -1014,15 +1014,15 @@ def argmin(x, axis=None): along `axis` removed. Example: - >>> x = keras_core.ops.arange(6).reshape(2, 3) + 10 + >>> x = keras.ops.arange(6).reshape(2, 3) + 10 >>> x array([[10, 11, 12], [13, 14, 15]], dtype=int32) - >>> keras_core.ops.argmin(x) + >>> keras.ops.argmin(x) array(0, dtype=int32) - >>> keras_core.ops.argmin(x, axis=0) + >>> keras.ops.argmin(x, axis=0) array([0, 0, 0], dtype=int32) - >>> keras_core.ops.argmin(x, axis=1) + >>> keras.ops.argmin(x, axis=1) array([0, 0], dtype=int32) """ if any_symbolic_tensors((x,)): @@ -1044,7 +1044,7 @@ class Argsort(Operation): return KerasTensor(x.shape, dtype="int32") -@keras_core_export(["keras_core.ops.argsort", "keras_core.ops.numpy.argsort"]) +@keras_export(["keras.ops.argsort", "keras.ops.numpy.argsort"]) def argsort(x, axis=-1): """Returns the indices that would sort a tensor. @@ -1058,21 +1058,21 @@ def argsort(x, axis=-1): Examples: One dimensional array: - >>> x = keras_core.ops.array([3, 1, 2]) - >>> keras_core.ops.argsort(x) + >>> x = keras.ops.array([3, 1, 2]) + >>> keras.ops.argsort(x) array([1, 2, 0], dtype=int32) Two-dimensional array: - >>> x = keras_core.ops.array([[0, 3], [3, 2], [4, 5]]) + >>> x = keras.ops.array([[0, 3], [3, 2], [4, 5]]) >>> x array([[0, 3], [3, 2], [4, 5]], dtype=int32) - >>> keras_core.ops.argsort(x, axis=0) + >>> keras.ops.argsort(x, axis=0) array([[0, 1], [1, 0], [2, 2]], dtype=int32) - >>> keras_core.ops.argsort(x, axis=1) + >>> keras.ops.argsort(x, axis=1) array([[0, 1], [1, 0], [0, 1]], dtype=int32) @@ -1090,7 +1090,7 @@ class Array(Operation): return KerasTensor(x.shape, dtype=dtype) -@keras_core_export(["keras_core.ops.array", "keras_core.ops.numpy.array"]) +@keras_export(["keras.ops.array", "keras.ops.numpy.array"]) def array(x, dtype=None): """Create a tensor. @@ -1102,10 +1102,10 @@ def array(x, dtype=None): A tensor. Examples: - >>> keras_core.ops.array([1, 2, 3]) + >>> keras.ops.array([1, 2, 3]) array([1, 2, 3], dtype=int32) - >>> keras_core.ops.array([1, 2, 3], dtype="float32") + >>> keras.ops.array([1, 2, 3], dtype="float32") array([1., 2., 3.], dtype=float32) """ if any_symbolic_tensors((x,)): @@ -1159,7 +1159,7 @@ class Average(Operation): ) -@keras_core_export(["keras_core.ops.average", "keras_core.ops.numpy.average"]) +@keras_export(["keras.ops.average", "keras.ops.numpy.average"]) def average(x, axis=None, weights=None): """Compute the weighted average along the specified axis. @@ -1182,31 +1182,31 @@ def average(x, axis=None, weights=None): Return the average along the specified axis. Examples: - >>> data = keras_core.ops.arange(1, 5) + >>> data = keras.ops.arange(1, 5) >>> data array([1, 2, 3, 4], dtype=int32) - >>> keras_core.ops.average(data) + >>> keras.ops.average(data) array(2.5, dtype=float32) - >>> keras_core.ops.average( - ... keras_core.ops.arange(1, 11), - ... weights=keras_core.ops.arange(10, 0, -1) + >>> keras.ops.average( + ... keras.ops.arange(1, 11), + ... weights=keras.ops.arange(10, 0, -1) ... ) array(4., dtype=float32) - >>> data = keras_core.ops.arange(6).reshape((3, 2)) + >>> data = keras.ops.arange(6).reshape((3, 2)) >>> data array([[0, 1], [2, 3], [4, 5]], dtype=int32) - >>> keras_core.ops.average( + >>> keras.ops.average( ... data, ... axis=1, - ... weights=keras_core.ops.array([1./4, 3./4]) + ... weights=keras.ops.array([1./4, 3./4]) ... ) array([0.75, 2.75, 4.75], dtype=float32) - >>> keras_core.ops.average( + >>> keras.ops.average( ... data, - ... weights=keras_core.ops.array([1./4, 3./4]) + ... weights=keras.ops.array([1./4, 3./4]) ... ) Traceback (most recent call last): ... @@ -1233,7 +1233,7 @@ class Bincount(Operation): return KerasTensor(out_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.bincount", "keras_core.ops.numpy.bincount"]) +@keras_export(["keras.ops.bincount", "keras.ops.numpy.bincount"]) def bincount(x, weights=None, minlength=0): """Count the number of occurrences of each value in a tensor of integers. @@ -1257,16 +1257,16 @@ def bincount(x, weights=None, minlength=0): minlength. Examples: - >>> x = keras_core.ops.array([1, 2, 2, 3], dtype="uint8") - >>> keras_core.ops.bincount(x) + >>> x = keras.ops.array([1, 2, 2, 3], dtype="uint8") + >>> keras.ops.bincount(x) array([0, 1, 2, 1], dtype=int32) >>> weights = x / 2 >>> weights array([0.5, 1., 1., 1.5], dtype=float64) - >>> keras_core.ops.bincount(x, weights=weights) + >>> keras.ops.bincount(x, weights=weights) array([0., 0.5, 2., 1.5], dtype=float64) - >>> minlength = (keras_core.ops.max(x).numpy() + 1) + 2 # 6 - >>> keras_core.ops.bincount(x, minlength=minlength) + >>> minlength = (keras.ops.max(x).numpy() + 1) + 2 # 6 + >>> keras.ops.bincount(x, minlength=minlength) array([0, 1, 2, 1, 0, 0], dtype=int32) """ if any_symbolic_tensors((x,)): @@ -1288,10 +1288,10 @@ class BroadcastTo(Operation): return KerasTensor(self.shape, dtype=x.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.broadcast_to", - "keras_core.ops.numpy.broadcast_to", + "keras.ops.broadcast_to", + "keras.ops.numpy.broadcast_to", ] ) def broadcast_to(x, shape): @@ -1306,8 +1306,8 @@ def broadcast_to(x, shape): A tensor with the desired shape. Examples: - >>> x = keras_core.ops.array([1, 2, 3]) - >>> keras_core.ops.broadcast_to(x, (3, 3)) + >>> x = keras.ops.array([1, 2, 3]) + >>> keras.ops.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) @@ -1325,7 +1325,7 @@ class Ceil(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.ceil", "keras_core.ops.numpy.ceil"]) +@keras_export(["keras.ops.ceil", "keras.ops.numpy.ceil"]) def ceil(x): """Return the ceiling of the input, element-wise. @@ -1356,7 +1356,7 @@ class Clip(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.clip", "keras_core.ops.numpy.clip"]) +@keras_export(["keras.ops.clip", "keras.ops.numpy.clip"]) def clip(x, x_min, x_max): """Clip (limit) the values in a tensor. @@ -1411,10 +1411,10 @@ class Concatenate(Operation): return KerasTensor(output_shape, dtype=x.dtype, sparse=all_sparse) -@keras_core_export( +@keras_export( [ - "keras_core.ops.concatenate", - "keras_core.ops.numpy.concatenate", + "keras.ops.concatenate", + "keras.ops.numpy.concatenate", ] ) def concatenate(xs, axis=0): @@ -1440,8 +1440,8 @@ class Conjugate(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export( - ["keras_core.ops.conjugate", "keras_core.ops.numpy.conjugate"] +@keras_export( + ["keras.ops.conjugate", "keras.ops.numpy.conjugate"] ) def conjugate(x): """Returns the complex conjugate, element-wise. @@ -1449,7 +1449,7 @@ def conjugate(x): The complex conjugate of a complex number is obtained by changing the sign of its imaginary part. - `keras_core.ops.conj` is a shorthand for this function. + `keras.ops.conj` is a shorthand for this function. Args: x: Input tensor. @@ -1466,9 +1466,9 @@ class Conj(Conjugate): pass -@keras_core_export(["keras_core.ops.conj", "keras_core.ops.numpy.conj"]) +@keras_export(["keras.ops.conj", "keras.ops.numpy.conj"]) def conj(x): - """Shorthand for `keras_core.ops.conjugate`.""" + """Shorthand for `keras.ops.conjugate`.""" return conjugate(x) @@ -1480,7 +1480,7 @@ class Copy(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.copy", "keras_core.ops.numpy.copy"]) +@keras_export(["keras.ops.copy", "keras.ops.numpy.copy"]) def copy(x): """Returns a copy of `x`. @@ -1503,7 +1503,7 @@ class Cos(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.cos", "keras_core.ops.numpy.cos"]) +@keras_export(["keras.ops.cos", "keras.ops.numpy.cos"]) def cos(x): """Cosine, element-wise. @@ -1526,7 +1526,7 @@ class Cosh(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.cosh", "keras_core.ops.numpy.cosh"]) +@keras_export(["keras.ops.cosh", "keras.ops.numpy.cosh"]) def cosh(x): """Hyperbolic cosine, element-wise. @@ -1559,10 +1559,10 @@ class CountNonzero(Operation): ) -@keras_core_export( +@keras_export( [ - "keras_core.ops.count_nonzero", - "keras_core.ops.numpy.count_nonzero", + "keras.ops.count_nonzero", + "keras.ops.numpy.count_nonzero", ] ) def count_nonzero(x, axis=None): @@ -1579,12 +1579,12 @@ def count_nonzero(x, axis=None): int or tensor of ints. Examples: - >>> x = keras_core.ops.array([[0, 1, 7, 0], [3, 0, 2, 19]]) - >>> keras_core.ops.count_nonzero(x) + >>> x = keras.ops.array([[0, 1, 7, 0], [3, 0, 2, 19]]) + >>> keras.ops.count_nonzero(x) 5 - >>> keras_core.ops.count_nonzero(x, axis=0) + >>> keras.ops.count_nonzero(x, axis=0) array([1, 1, 2, 1], dtype=int64) - >>> keras_core.ops.count_nonzero(x, axis=1) + >>> keras.ops.count_nonzero(x, axis=1) array([2, 3], dtype=int64) """ if any_symbolic_tensors((x,)): @@ -1639,7 +1639,7 @@ class Cross(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.cross", "keras_core.ops.numpy.cross"]) +@keras_export(["keras.ops.cross", "keras.ops.numpy.cross"]) def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): """Returns the cross product of two (arrays of) vectors. @@ -1706,7 +1706,7 @@ class Cumprod(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.cumprod", "keras_core.ops.numpy.cumprod"]) +@keras_export(["keras.ops.cumprod", "keras.ops.numpy.cumprod"]) def cumprod(x, axis=None): """Return the cumulative product of elements along a given axis. @@ -1741,7 +1741,7 @@ class Cumsum(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.cumsum", "keras_core.ops.numpy.cumsum"]) +@keras_export(["keras.ops.cumsum", "keras.ops.numpy.cumsum"]) def cumsum(x, axis=None): """Returns the cumulative sum of elements along a given axis. @@ -1795,7 +1795,7 @@ class Diag(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.diag", "keras_core.ops.numpy.diag"]) +@keras_export(["keras.ops.diag", "keras.ops.numpy.diag"]) def diag(x, k=0): """Extract a diagonal or construct a diagonal array. @@ -1810,7 +1810,7 @@ def diag(x, k=0): The extracted diagonal or constructed diagonal tensor. Examples: - >>> from keras_core import ops + >>> from keras import ops >>> x = ops.arange(9).reshape((3, 3)) >>> x array([[0, 1, 2], @@ -1876,7 +1876,7 @@ class Diagonal(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.diagonal", "keras_core.ops.numpy.diagonal"]) +@keras_export(["keras.ops.diagonal", "keras.ops.numpy.diagonal"]) def diagonal(x, offset=0, axis1=0, axis2=1): """Return specified diagonals. @@ -1904,7 +1904,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1): Tensor of diagonals. Examples: - >>> from keras_core import ops + >>> from keras import ops >>> x = ops.arange(4).reshape((2, 2)) >>> x array([[0, 1], @@ -1952,7 +1952,7 @@ class Digitize(Operation): return KerasTensor(x.shape, dtype="int32") -@keras_core_export(["keras_core.ops.digitize", "keras_core.ops.numpy.digitize"]) +@keras_export(["keras.ops.digitize", "keras.ops.numpy.digitize"]) def digitize(x, bins): """Returns the indices of the bins to which each value in `x` belongs. @@ -1967,7 +1967,7 @@ def digitize(x, bins): Example: >>> x = np.array([0.0, 1.0, 3.0, 1.6]) >>> bins = np.array([0.0, 3.0, 4.5, 7.0]) - >>> keras_core.ops.digitize(x, bins) + >>> keras.ops.digitize(x, bins) array([1, 1, 2, 1]) """ if any_symbolic_tensors((x, bins)): @@ -2011,7 +2011,7 @@ class Dot(Operation): ) -@keras_core_export(["keras_core.ops.dot", "keras_core.ops.numpy.dot"]) +@keras_export(["keras.ops.dot", "keras.ops.numpy.dot"]) def dot(x1, x2): """Dot product of two tensors. @@ -2214,7 +2214,7 @@ class Einsum(Operation): return KerasTensor(output_shape, dtype=dtype) -@keras_core_export(["keras_core.ops.einsum", "keras_core.ops.numpy.einsum"]) +@keras_export(["keras.ops.einsum", "keras.ops.numpy.einsum"]) def einsum(subscripts, *operands): """Evaluates the Einstein summation convention on the operands. @@ -2230,7 +2230,7 @@ def einsum(subscripts, *operands): The calculation based on the Einstein summation convention. Example: - >>> from keras_core import ops + >>> from keras import ops >>> a = ops.arange(25).reshape(5, 5) >>> b = ops.arange(5) >>> c = ops.arange(6).reshape(2, 3) @@ -2311,7 +2311,7 @@ class Empty(Operation): return KerasTensor(shape, dtype=dtype) -@keras_core_export(["keras_core.ops.empty", "keras_core.ops.numpy.empty"]) +@keras_export(["keras.ops.empty", "keras.ops.numpy.empty"]) def empty(shape, dtype="float32"): """Return a tensor of given shape and type filled with uninitialized data. @@ -2336,7 +2336,7 @@ class Equal(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.equal", "keras_core.ops.numpy.equal"]) +@keras_export(["keras.ops.equal", "keras.ops.numpy.equal"]) def equal(x1, x2): """Returns `(x1 == x2)` element-wise. @@ -2360,7 +2360,7 @@ class Exp(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.exp", "keras_core.ops.numpy.exp"]) +@keras_export(["keras.ops.exp", "keras.ops.numpy.exp"]) def exp(x): """Calculate the exponential of all elements in the input tensor. @@ -2398,10 +2398,10 @@ class ExpandDims(Operation): return KerasTensor(output_shape, dtype=x.dtype, sparse=x.sparse) -@keras_core_export( +@keras_export( [ - "keras_core.ops.expand_dims", - "keras_core.ops.numpy.expand_dims", + "keras.ops.expand_dims", + "keras.ops.numpy.expand_dims", ] ) def expand_dims(x, axis): @@ -2430,7 +2430,7 @@ class Expm1(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.expm1", "keras_core.ops.numpy.expm1"]) +@keras_export(["keras.ops.expm1", "keras.ops.numpy.expm1"]) def expm1(x): """Calculate `exp(x) - 1` for all elements in the tensor. @@ -2457,7 +2457,7 @@ class Flip(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.flip", "keras_core.ops.numpy.flip"]) +@keras_export(["keras.ops.flip", "keras.ops.numpy.flip"]) def flip(x, axis=None): """Reverse the order of elements in the tensor along the given axis. @@ -2484,7 +2484,7 @@ class Floor(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.floor", "keras_core.ops.numpy.floor"]) +@keras_export(["keras.ops.floor", "keras.ops.numpy.floor"]) def floor(x): """Return the floor of the input, element-wise. @@ -2509,7 +2509,7 @@ class Full(Operation): return KerasTensor(shape, dtype=dtype) -@keras_core_export(["keras_core.ops.full", "keras_core.ops.numpy.full"]) +@keras_export(["keras.ops.full", "keras.ops.numpy.full"]) def full(shape, fill_value, dtype=None): """Return a new tensor of given shape and type, filled with `fill_value`. @@ -2532,8 +2532,8 @@ class FullLike(Operation): return KerasTensor(x.shape, dtype=dtype) -@keras_core_export( - ["keras_core.ops.full_like", "keras_core.ops.numpy.full_like"] +@keras_export( + ["keras.ops.full_like", "keras.ops.numpy.full_like"] ) def full_like(x, fill_value, dtype=None): """Return a full tensor with the same shape and type as the given tensor. @@ -2622,7 +2622,7 @@ class GetItem(Operation): return KerasTensor(tuple(new_shape), dtype=x.dtype) -@keras_core_export(["keras_core.ops.get_item", "keras_core.ops.numpy.get_item"]) +@keras_export(["keras.ops.get_item", "keras.ops.numpy.get_item"]) def get_item(x, key): """Return `x[key]`.""" if any_symbolic_tensors((x,)): @@ -2641,7 +2641,7 @@ class Greater(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.greater", "keras_core.ops.numpy.greater"]) +@keras_export(["keras.ops.greater", "keras.ops.numpy.greater"]) def greater(x1, x2): """Return the truth value of `x1 > x2` element-wise. @@ -2668,10 +2668,10 @@ class GreaterEqual(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.greater_equal", - "keras_core.ops.numpy.greater_equal", + "keras.ops.greater_equal", + "keras.ops.numpy.greater_equal", ] ) def greater_equal(x1, x2): @@ -2713,7 +2713,7 @@ class Hstack(Operation): return KerasTensor(output_shape) -@keras_core_export(["keras_core.ops.hstack", "keras_core.ops.numpy.hstack"]) +@keras_export(["keras.ops.hstack", "keras.ops.numpy.hstack"]) def hstack(xs): """Stack tensors in sequence horizontally (column wise). @@ -2739,7 +2739,7 @@ class Identity(Operation): return KerasTensor([n, n], dtype=dtype) -@keras_core_export(["keras_core.ops.identity", "keras_core.ops.numpy.identity"]) +@keras_export(["keras.ops.identity", "keras.ops.numpy.identity"]) def identity(n, dtype="float32"): """Return the identity tensor. @@ -2764,7 +2764,7 @@ class Imag(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.imag", "keras_core.ops.numpy.imag"]) +@keras_export(["keras.ops.imag", "keras.ops.numpy.imag"]) def imag(x): """Return the imaginary part of the complex argument. @@ -2790,7 +2790,7 @@ class Isclose(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.isclose", "keras_core.ops.numpy.isclose"]) +@keras_export(["keras.ops.isclose", "keras.ops.numpy.isclose"]) def isclose(x1, x2): """Return whether two tensors are element-wise almost equal. @@ -2814,7 +2814,7 @@ class Isfinite(Operation): return KerasTensor(x.shape, dtype="bool") -@keras_core_export(["keras_core.ops.isfinite", "keras_core.ops.numpy.isfinite"]) +@keras_export(["keras.ops.isfinite", "keras.ops.numpy.isfinite"]) def isfinite(x): """Return whether a tensor is finite, element-wise. @@ -2841,7 +2841,7 @@ class Isinf(Operation): return KerasTensor(x.shape, dtype="bool") -@keras_core_export(["keras_core.ops.isinf", "keras_core.ops.numpy.isinf"]) +@keras_export(["keras.ops.isinf", "keras.ops.numpy.isinf"]) def isinf(x): """Test element-wise for positive or negative infinity. @@ -2864,7 +2864,7 @@ class Isnan(Operation): return KerasTensor(x.shape, dtype="bool") -@keras_core_export(["keras_core.ops.isnan", "keras_core.ops.numpy.isnan"]) +@keras_export(["keras.ops.isnan", "keras.ops.numpy.isnan"]) def isnan(x): """Test element-wise for NaN and return result as a boolean tensor. @@ -2890,7 +2890,7 @@ class Less(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.less", "keras_core.ops.numpy.less"]) +@keras_export(["keras.ops.less", "keras.ops.numpy.less"]) def less(x1, x2): """Return the truth value of `x1 < x2` element-wise. @@ -2917,10 +2917,10 @@ class LessEqual(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.less_equal", - "keras_core.ops.numpy.less_equal", + "keras.ops.less_equal", + "keras.ops.numpy.less_equal", ] ) def less_equal(x1, x2): @@ -2985,7 +2985,7 @@ class Linspace(Operation): return KerasTensor(output_shape, dtype=dtype) -@keras_core_export(["keras_core.ops.linspace", "keras_core.ops.numpy.linspace"]) +@keras_export(["keras.ops.linspace", "keras.ops.numpy.linspace"]) def linspace( start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0 ): @@ -3040,7 +3040,7 @@ class Log(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.log", "keras_core.ops.numpy.log"]) +@keras_export(["keras.ops.log", "keras.ops.numpy.log"]) def log(x): """Natural logarithm, element-wise. @@ -3063,7 +3063,7 @@ class Log10(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.log10", "keras_core.ops.numpy.log10"]) +@keras_export(["keras.ops.log10", "keras.ops.numpy.log10"]) def log10(x): """Return the base 10 logarithm of the input tensor, element-wise. @@ -3086,7 +3086,7 @@ class Log1p(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.log1p", "keras_core.ops.numpy.log1p"]) +@keras_export(["keras.ops.log1p", "keras.ops.numpy.log1p"]) def log1p(x): """Returns the natural logarithm of one plus the `x`, element-wise. @@ -3111,7 +3111,7 @@ class Log2(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.log2", "keras_core.ops.numpy.log2"]) +@keras_export(["keras.ops.log2", "keras.ops.numpy.log2"]) def log2(x): """Base-2 logarithm of `x`, element-wise. @@ -3137,8 +3137,8 @@ class Logaddexp(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( - ["keras_core.ops.logaddexp", "keras_core.ops.numpy.logaddexp"] +@keras_export( + ["keras.ops.logaddexp", "keras.ops.numpy.logaddexp"] ) def logaddexp(x1, x2): """Logarithm of the sum of exponentiations of the inputs. @@ -3169,10 +3169,10 @@ class LogicalAnd(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.logical_and", - "keras_core.ops.numpy.logical_and", + "keras.ops.logical_and", + "keras.ops.numpy.logical_and", ] ) def logical_and(x1, x2): @@ -3200,10 +3200,10 @@ class LogicalNot(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.logical_not", - "keras_core.ops.numpy.logical_not", + "keras.ops.logical_not", + "keras.ops.numpy.logical_not", ] ) def logical_not(x): @@ -3233,10 +3233,10 @@ class LogicalOr(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.logical_or", - "keras_core.ops.numpy.logical_or", + "keras.ops.logical_or", + "keras.ops.numpy.logical_or", ] ) def logical_or(x1, x2): @@ -3299,7 +3299,7 @@ class Logspace(Operation): return KerasTensor(output_shape, dtype=dtype) -@keras_core_export(["keras_core.ops.logspace", "keras_core.ops.numpy.logspace"]) +@keras_export(["keras.ops.logspace", "keras.ops.numpy.logspace"]) def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): """Returns numbers spaced evenly on a log scale. @@ -3374,7 +3374,7 @@ class Matmul(Operation): return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse) -@keras_core_export(["keras_core.ops.matmul", "keras_core.ops.numpy.matmul"]) +@keras_export(["keras.ops.matmul", "keras.ops.numpy.matmul"]) def matmul(x1, x2): """Matrix product of two tensors. @@ -3424,7 +3424,7 @@ class Max(Operation): ) -@keras_core_export(["keras_core.ops.max", "keras_core.ops.numpy.max"]) +@keras_export(["keras.ops.max", "keras.ops.numpy.max"]) def max(x, axis=None, keepdims=False, initial=None): """Return the maximum of a tensor or maximum along an axis. @@ -3460,7 +3460,7 @@ class Maximum(Operation): return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse) -@keras_core_export(["keras_core.ops.maximum", "keras_core.ops.numpy.maximum"]) +@keras_export(["keras.ops.maximum", "keras.ops.numpy.maximum"]) def maximum(x1, x2): """Element-wise maximum of `x1` and `x2`. @@ -3508,7 +3508,7 @@ class Meshgrid(Operation): return [KerasTensor(output_shape) for _ in range(len(x))] -@keras_core_export(["keras_core.ops.meshgrid", "keras_core.ops.numpy.meshgrid"]) +@keras_export(["keras.ops.meshgrid", "keras.ops.numpy.meshgrid"]) def meshgrid(*x, indexing="xy"): """Creates grids of coordinates from coordinate vectors. @@ -3526,7 +3526,7 @@ def meshgrid(*x, indexing="xy"): Sequence of N tensors. Example: - >>> from keras_core import ops + >>> from keras import ops >>> x = ops.array([1, 2, 3]) >>> y = ops.array([4, 5, 6]) @@ -3566,7 +3566,7 @@ class Min(Operation): ) -@keras_core_export(["keras_core.ops.min", "keras_core.ops.numpy.min"]) +@keras_export(["keras.ops.min", "keras.ops.numpy.min"]) def min(x, axis=None, keepdims=False, initial=None): """Return the minimum of a tensor or minimum along an axis. @@ -3602,7 +3602,7 @@ class Minimum(Operation): return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse) -@keras_core_export(["keras_core.ops.minimum", "keras_core.ops.numpy.minimum"]) +@keras_export(["keras.ops.minimum", "keras.ops.numpy.minimum"]) def minimum(x1, x2): """Element-wise minimum of `x1` and `x2`. @@ -3629,7 +3629,7 @@ class Mod(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.mod", "keras_core.ops.numpy.mod"]) +@keras_export(["keras.ops.mod", "keras.ops.numpy.mod"]) def mod(x1, x2): """Returns the element-wise remainder of division. @@ -3689,7 +3689,7 @@ class Moveaxis(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.moveaxis", "keras_core.ops.numpy.moveaxis"]) +@keras_export(["keras.ops.moveaxis", "keras.ops.numpy.moveaxis"]) def moveaxis(x, source, destination): """Move axes of a tensor to new positions. @@ -3714,10 +3714,10 @@ class NanToNum(Operation): return backend.numpy.nan_to_num(x) -@keras_core_export( +@keras_export( [ - "keras_core.ops.nan_to_num", - "keras_core.ops.numpy.nan_to_num", + "keras.ops.nan_to_num", + "keras.ops.numpy.nan_to_num", ] ) def nan_to_num(x): @@ -3742,7 +3742,7 @@ class Ndim(Operation): return KerasTensor([len(x.shape)]) -@keras_core_export(["keras_core.ops.ndim", "keras_core.ops.numpy.ndim"]) +@keras_export(["keras.ops.ndim", "keras.ops.numpy.ndim"]) def ndim(x): """Return the number of dimensions of a tensor. @@ -3762,7 +3762,7 @@ class Nonzero(Operation): return backend.numpy.nonzero(x) -@keras_core_export(["keras_core.ops.nonzero", "keras_core.ops.numpy.nonzero"]) +@keras_export(["keras.ops.nonzero", "keras.ops.numpy.nonzero"]) def nonzero(x): """Return the indices of the elements that are non-zero. @@ -3786,8 +3786,8 @@ class NotEqual(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( - ["keras_core.ops.not_equal", "keras_core.ops.numpy.not_equal"] +@keras_export( + ["keras.ops.not_equal", "keras.ops.numpy.not_equal"] ) def not_equal(x1, x2): """Return `(x1 != x2)` element-wise. @@ -3814,8 +3814,8 @@ class OnesLike(Operation): return KerasTensor(x.shape, dtype=dtype) -@keras_core_export( - ["keras_core.ops.ones_like", "keras_core.ops.numpy.ones_like"] +@keras_export( + ["keras.ops.ones_like", "keras.ops.numpy.ones_like"] ) def ones_like(x, dtype=None): """Return a tensor of ones with the same shape and type of `x`. @@ -3842,10 +3842,10 @@ class ZerosLike(Operation): return KerasTensor(x.shape, dtype=dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.zeros_like", - "keras_core.ops.numpy.zeros_like", + "keras.ops.zeros_like", + "keras.ops.numpy.zeros_like", ] ) def zeros_like(x, dtype=None): @@ -3882,7 +3882,7 @@ class Outer(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.outer", "keras_core.ops.numpy.outer"]) +@keras_export(["keras.ops.outer", "keras.ops.numpy.outer"]) def outer(x1, x2): """Compute the outer product of two vectors. @@ -3951,7 +3951,7 @@ class Pad(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.pad", "keras_core.ops.numpy.pad"]) +@keras_export(["keras.ops.pad", "keras.ops.numpy.pad"]) def pad(x, pad_width, mode="constant"): """Pad a tensor. @@ -4011,7 +4011,7 @@ class Prod(Operation): ) -@keras_core_export(["keras_core.ops.prod", "keras_core.ops.numpy.prod"]) +@keras_export(["keras.ops.prod", "keras.ops.numpy.prod"]) def prod(x, axis=None, keepdims=False, dtype=None): """Return the product of tensor elements over a given axis. @@ -4046,7 +4046,7 @@ class Ravel(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.ravel", "keras_core.ops.numpy.ravel"]) +@keras_export(["keras.ops.ravel", "keras.ops.numpy.ravel"]) def ravel(x): """Return a contiguous flattened tensor. @@ -4071,7 +4071,7 @@ class Real(Operation): return KerasTensor(x.shape) -@keras_core_export(["keras_core.ops.real", "keras_core.ops.numpy.real"]) +@keras_export(["keras.ops.real", "keras.ops.numpy.real"]) def real(x): """Return the real part of the complex argument. @@ -4094,10 +4094,10 @@ class Reciprocal(Operation): return KerasTensor(x.shape) -@keras_core_export( +@keras_export( [ - "keras_core.ops.reciprocal", - "keras_core.ops.numpy.reciprocal", + "keras.ops.reciprocal", + "keras.ops.numpy.reciprocal", ] ) def reciprocal(x): @@ -4150,7 +4150,7 @@ class Repeat(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.repeat", "keras_core.ops.numpy.repeat"]) +@keras_export(["keras.ops.repeat", "keras.ops.numpy.repeat"]) def repeat(x, repeats, axis=None): """Repeat each element of a tensor after themselves. @@ -4183,7 +4183,7 @@ class Reshape(Operation): return KerasTensor(output_shape, dtype=x.dtype, sparse=x.sparse) -@keras_core_export(["keras_core.ops.reshape", "keras_core.ops.numpy.reshape"]) +@keras_export(["keras.ops.reshape", "keras.ops.numpy.reshape"]) def reshape(x, new_shape): """Gives a new shape to a tensor without changing its data. @@ -4214,7 +4214,7 @@ class Roll(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.roll", "keras_core.ops.numpy.roll"]) +@keras_export(["keras.ops.roll", "keras.ops.numpy.roll"]) def roll(x, shift, axis=None): """Roll tensor elements along a given axis. @@ -4247,7 +4247,7 @@ class Round(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.round", "keras_core.ops.numpy.round"]) +@keras_export(["keras.ops.round", "keras.ops.numpy.round"]) def round(x, decimals=0): """Evenly round to the given number of decimals. @@ -4271,7 +4271,7 @@ class Sign(Operation): return KerasTensor(x.shape, dtype="int32") -@keras_core_export(["keras_core.ops.sign", "keras_core.ops.numpy.sign"]) +@keras_export(["keras.ops.sign", "keras.ops.numpy.sign"]) def sign(x): """Returns a tensor with the signs of the elements of `x`. @@ -4294,7 +4294,7 @@ class Sin(Operation): return KerasTensor(x.shape) -@keras_core_export(["keras_core.ops.sin", "keras_core.ops.numpy.sin"]) +@keras_export(["keras.ops.sin", "keras.ops.numpy.sin"]) def sin(x): """Trigonomeric sine, element-wise. @@ -4317,7 +4317,7 @@ class Sinh(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.sinh", "keras_core.ops.numpy.sinh"]) +@keras_export(["keras.ops.sinh", "keras.ops.numpy.sinh"]) def sinh(x): """Hyperbolic sine, element-wise. @@ -4340,7 +4340,7 @@ class Size(Operation): return KerasTensor([], dtype="int32") -@keras_core_export(["keras_core.ops.size", "keras_core.ops.numpy.size"]) +@keras_export(["keras.ops.size", "keras.ops.numpy.size"]) def size(x): """Return the number of elements in a tensor. @@ -4367,7 +4367,7 @@ class Sort(Operation): return KerasTensor(x.shape, x.dtype) -@keras_core_export(["keras_core.ops.sort", "keras_core.ops.numpy.sort"]) +@keras_export(["keras.ops.sort", "keras.ops.numpy.sort"]) def sort(x, axis=-1): """Sorts the elements of `x` along a given axis in ascending order. @@ -4429,7 +4429,7 @@ class Split(Operation): return outputs -@keras_core_export(["keras_core.ops.split", "keras_core.ops.numpy.split"]) +@keras_export(["keras.ops.split", "keras.ops.numpy.split"]) def split(x, indices_or_sections, axis=0): """Split a tensor into chunks. @@ -4485,7 +4485,7 @@ class Stack(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.stack", "keras_core.ops.numpy.stack"]) +@keras_export(["keras.ops.stack", "keras.ops.numpy.stack"]) def stack(x, axis=0): """Join a sequence of tensors along a new axis. @@ -4522,7 +4522,7 @@ class Std(Operation): ) -@keras_core_export(["keras_core.ops.std", "keras_core.ops.numpy.std"]) +@keras_export(["keras.ops.std", "keras.ops.numpy.std"]) def std(x, axis=None, keepdims=False): """Compute the standard deviation along the specified axis. @@ -4560,7 +4560,7 @@ class Swapaxes(Operation): return KerasTensor(x_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.swapaxes", "keras_core.ops.numpy.swapaxes"]) +@keras_export(["keras.ops.swapaxes", "keras.ops.numpy.swapaxes"]) def swapaxes(x, axis1, axis2): """Interchange two axes of a tensor. @@ -4600,7 +4600,7 @@ class Take(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.take", "keras_core.ops.numpy.take"]) +@keras_export(["keras.ops.take", "keras.ops.numpy.take"]) def take(x, indices, axis=None): """Take elements from a tensor along an axis. @@ -4652,10 +4652,10 @@ class TakeAlongAxis(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.take_along_axis", - "keras_core.ops.numpy.take_along_axis", + "keras.ops.take_along_axis", + "keras.ops.numpy.take_along_axis", ] ) def take_along_axis(x, indices, axis=None): @@ -4683,7 +4683,7 @@ class Tan(Operation): return KerasTensor(x.shape) -@keras_core_export(["keras_core.ops.tan", "keras_core.ops.numpy.tan"]) +@keras_export(["keras.ops.tan", "keras.ops.numpy.tan"]) def tan(x): """Compute tangent, element-wise. @@ -4706,7 +4706,7 @@ class Tanh(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.tanh", "keras_core.ops.numpy.tanh"]) +@keras_export(["keras.ops.tanh", "keras.ops.numpy.tanh"]) def tanh(x): """Hyperbolic tangent, element-wise. @@ -4762,8 +4762,8 @@ class Tensordot(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( - ["keras_core.ops.tensordot", "keras_core.ops.numpy.tensordot"] +@keras_export( + ["keras.ops.tensordot", "keras.ops.numpy.tensordot"] ) def tensordot(x1, x2, axes=2): """Compute the tensor dot product along specified axes. @@ -4811,7 +4811,7 @@ class Tile(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.tile", "keras_core.ops.numpy.tile"]) +@keras_export(["keras.ops.tile", "keras.ops.numpy.tile"]) def tile(x, repeats): """Repeat `x` the number of times given by `repeats`. @@ -4857,7 +4857,7 @@ class Trace(Operation): return KerasTensor(output_shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.trace", "keras_core.ops.numpy.trace"]) +@keras_export(["keras.ops.trace", "keras.ops.numpy.trace"]) def trace(x, offset=0, axis1=0, axis2=1): """Return the sum along diagonals of the tensor. @@ -4900,7 +4900,7 @@ class Tri(Operation): return KerasTensor((N, M), dtype=dtype) -@keras_core_export(["keras_core.ops.tri", "keras_core.ops.numpy.tri"]) +@keras_export(["keras.ops.tri", "keras.ops.numpy.tri"]) def tri(N, M=None, k=0, dtype="float32"): """Return a tensor with ones at and below a diagonal and zeros elsewhere. @@ -4931,7 +4931,7 @@ class Tril(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.tril", "keras_core.ops.numpy.tril"]) +@keras_export(["keras.ops.tril", "keras.ops.numpy.tril"]) def tril(x, k=0): """Return lower triangle of a tensor. @@ -4963,7 +4963,7 @@ class Triu(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.triu", "keras_core.ops.numpy.triu"]) +@keras_export(["keras.ops.triu", "keras.ops.numpy.triu"]) def triu(x, k=0): """Return upper triangle of a tensor. @@ -4991,7 +4991,7 @@ class Vdot(Operation): return KerasTensor([], dtype=x1.dtype) -@keras_core_export(["keras_core.ops.vdot", "keras_core.ops.numpy.vdot"]) +@keras_export(["keras.ops.vdot", "keras.ops.numpy.vdot"]) def vdot(x1, x2): """Return the dot product of two vectors. @@ -5037,7 +5037,7 @@ class Vstack(Operation): return KerasTensor(output_shape) -@keras_core_export(["keras_core.ops.vstack", "keras_core.ops.numpy.vstack"]) +@keras_export(["keras.ops.vstack", "keras.ops.numpy.vstack"]) def vstack(xs): """Stack tensors in sequence vertically (row wise). @@ -5066,7 +5066,7 @@ class Where(Operation): return KerasTensor(output_shape, dtype=output_dtype) -@keras_core_export(["keras_core.ops.where", "keras_core.ops.numpy.where"]) +@keras_export(["keras.ops.where", "keras.ops.numpy.where"]) def where(condition, x1=None, x2=None): """Return elements chosen from `x1` or `x2` depending on `condition`. @@ -5103,7 +5103,7 @@ class Subtract(Operation): return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse) -@keras_core_export(["keras_core.ops.subtract", "keras_core.ops.numpy.subtract"]) +@keras_export(["keras.ops.subtract", "keras.ops.numpy.subtract"]) def subtract(x1, x2): """Subtract arguments element-wise. @@ -5133,7 +5133,7 @@ class Multiply(Operation): return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse) -@keras_core_export(["keras_core.ops.multiply", "keras_core.ops.numpy.multiply"]) +@keras_export(["keras.ops.multiply", "keras.ops.numpy.multiply"]) def multiply(x1, x2): """Multiply arguments element-wise. @@ -5160,11 +5160,11 @@ class Divide(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.divide", "keras_core.ops.numpy.divide"]) +@keras_export(["keras.ops.divide", "keras.ops.numpy.divide"]) def divide(x1, x2): """Divide arguments element-wise. - `keras_core.ops.true_divide` is an alias for this function. + `keras.ops.true_divide` is an alias for this function. Args: x1: First input tensor. @@ -5189,14 +5189,14 @@ class TrueDivide(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( +@keras_export( [ - "keras_core.ops.true_divide", - "keras_core.ops.numpy.true_divide", + "keras.ops.true_divide", + "keras.ops.numpy.true_divide", ] ) def true_divide(x1, x2): - """Alias for `keras_core.ops.divide`.""" + """Alias for `keras.ops.divide`.""" if any_symbolic_tensors((x1, x2)): return TrueDivide().symbolic_call(x1, x2) return backend.numpy.true_divide(x1, x2) @@ -5213,7 +5213,7 @@ class Power(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export(["keras_core.ops.power", "keras_core.ops.numpy.power"]) +@keras_export(["keras.ops.power", "keras.ops.numpy.power"]) def power(x1, x2): """First tensor elements raised to powers from second tensor, element-wise. @@ -5237,7 +5237,7 @@ class Negative(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.negative", "keras_core.ops.numpy.negative"]) +@keras_export(["keras.ops.negative", "keras.ops.numpy.negative"]) def negative(x): """Numerical negative, element-wise. @@ -5260,7 +5260,7 @@ class Square(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.square", "keras_core.ops.numpy.square"]) +@keras_export(["keras.ops.square", "keras.ops.numpy.square"]) def square(x): """Return the element-wise square of the input. @@ -5284,7 +5284,7 @@ class Sqrt(Operation): return KerasTensor(x.shape, dtype=x.dtype) -@keras_core_export(["keras_core.ops.sqrt", "keras_core.ops.numpy.sqrt"]) +@keras_export(["keras.ops.sqrt", "keras.ops.numpy.sqrt"]) def sqrt(x): """Return the non-negative square root of a tensor, element-wise. @@ -5323,7 +5323,7 @@ class Squeeze(Operation): return KerasTensor(input_shape, dtype=x.dtype, sparse=x.sparse) -@keras_core_export(["keras_core.ops.squeeze", "keras_core.ops.numpy.squeeze"]) +@keras_export(["keras.ops.squeeze", "keras.ops.numpy.squeeze"]) def squeeze(x, axis=None): """Remove axes of length one from `x`. @@ -5364,8 +5364,8 @@ class Transpose(Operation): return KerasTensor(output_shape, dtype=x.dtype, sparse=x.sparse) -@keras_core_export( - ["keras_core.ops.transpose", "keras_core.ops.numpy.transpose"] +@keras_export( + ["keras.ops.transpose", "keras.ops.numpy.transpose"] ) def transpose(x, axes=None): """Returns a tensor with `axes` transposed. @@ -5401,7 +5401,7 @@ class Mean(Operation): ) -@keras_core_export(["keras_core.ops.mean", "keras_core.ops.numpy.mean"]) +@keras_export(["keras.ops.mean", "keras.ops.numpy.mean"]) def mean(x, axis=None, keepdims=False): """Compute the arithmetic mean along the specified axes. @@ -5438,7 +5438,7 @@ class Var(Operation): ) -@keras_core_export(["keras_core.ops.var", "keras_core.ops.numpy.var"]) +@keras_export(["keras.ops.var", "keras.ops.numpy.var"]) def var(x, axis=None, keepdims=False): """Compute the variance along the specified axes. @@ -5475,7 +5475,7 @@ class Sum(Operation): ) -@keras_core_export(["keras_core.ops.sum", "keras_core.ops.numpy.sum"]) +@keras_export(["keras.ops.sum", "keras.ops.numpy.sum"]) def sum(x, axis=None, keepdims=False): """Sum of a tensor over the given axes. @@ -5502,7 +5502,7 @@ class Zeros(Operation): return KerasTensor(shape, dtype=dtype) -@keras_core_export(["keras_core.ops.zeros", "keras_core.ops.numpy.zeros"]) +@keras_export(["keras.ops.zeros", "keras.ops.numpy.zeros"]) def zeros(shape, dtype="float32"): """Return a new tensor of given shape and type, filled with zeros. @@ -5524,7 +5524,7 @@ class Ones(Operation): return KerasTensor(shape, dtype=dtype) -@keras_core_export(["keras_core.ops.ones", "keras_core.ops.numpy.ones"]) +@keras_export(["keras.ops.ones", "keras.ops.numpy.ones"]) def ones(shape, dtype="float32"): """Return a new tensor of given shape and type, filled with ones. @@ -5548,7 +5548,7 @@ class Eye(Operation): return KerasTensor((N, M), dtype=dtype) -@keras_core_export(["keras_core.ops.eye", "keras_core.ops.numpy.eye"]) +@keras_export(["keras.ops.eye", "keras.ops.numpy.eye"]) def eye(N, M=None, k=0, dtype="float32"): """Return a 2-D tensor with ones on the diagonal and zeros elsewhere. @@ -5577,8 +5577,8 @@ class FloorDivide(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( - ["keras_core.ops.floor_divide", "keras_core.ops.numpy.floor_divide"] +@keras_export( + ["keras.ops.floor_divide", "keras.ops.numpy.floor_divide"] ) def floor_divide(x1, x2): """Returns the largest integer smaller or equal to the division of inputs. @@ -5606,8 +5606,8 @@ class LogicalXor(Operation): return KerasTensor(output_shape, dtype=x1.dtype) -@keras_core_export( - ["keras_core.ops.logical_xor", "keras_core.ops.numpy.logical_xor"] +@keras_export( + ["keras.ops.logical_xor", "keras.ops.numpy.logical_xor"] ) def logical_xor(x1, x2): """Compute the truth value of `x1 XOR x2`, element-wise. diff --git a/keras_core/ops/numpy_test.py b/keras/ops/numpy_test.py similarity index 99% rename from keras_core/ops/numpy_test.py rename to keras/ops/numpy_test.py index bc39a092e..d918d11f2 100644 --- a/keras_core/ops/numpy_test.py +++ b/keras/ops/numpy_test.py @@ -3,11 +3,11 @@ import pytest from absl.testing import parameterized from tensorflow.python.ops.numpy_ops import np_config -from keras_core import backend -from keras_core import testing -from keras_core.backend.common import standardize_dtype -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.ops import numpy as knp +from keras import backend +from keras import testing +from keras.backend.common import standardize_dtype +from keras.backend.common.keras_tensor import KerasTensor +from keras.ops import numpy as knp # TODO: remove reliance on this (or alternatively, turn it on by default). np_config.enable_numpy_behavior() diff --git a/keras_core/ops/operation.py b/keras/ops/operation.py similarity index 95% rename from keras_core/ops/operation.py rename to keras/ops/operation.py index a2ecc5b2b..e4b325508 100644 --- a/keras_core/ops/operation.py +++ b/keras/ops/operation.py @@ -3,16 +3,16 @@ import textwrap import tree -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend.common.keras_tensor import any_symbolic_tensors -from keras_core.ops.node import Node -from keras_core.utils import python_utils -from keras_core.utils import traceback_utils -from keras_core.utils.naming import auto_name +from keras import backend +from keras.api_export import keras_export +from keras.backend.common.keras_tensor import any_symbolic_tensors +from keras.ops.node import Node +from keras.utils import python_utils +from keras.utils import traceback_utils +from keras.utils.naming import auto_name -@keras_core_export("keras_core.Operation") +@keras_export("keras.Operation") class Operation: def __init__(self, name=None): if name is None: @@ -105,7 +105,7 @@ class Operation: try: instance._lock = False if auto_config: - from keras_core.saving import serialization_lib + from keras.saving import serialization_lib instance._auto_config = serialization_lib.SerializableDict( **kwargs diff --git a/keras_core/ops/operation_test.py b/keras/ops/operation_test.py similarity index 96% rename from keras_core/ops/operation_test.py rename to keras/ops/operation_test.py index 694af7cdc..e01ef075e 100644 --- a/keras_core/ops/operation_test.py +++ b/keras/ops/operation_test.py @@ -1,10 +1,10 @@ import numpy as np -from keras_core import backend -from keras_core import testing -from keras_core.backend.common import keras_tensor -from keras_core.ops import numpy as knp -from keras_core.ops import operation +from keras import backend +from keras import testing +from keras.backend.common import keras_tensor +from keras.ops import numpy as knp +from keras.ops import operation class OpWithMultipleInputs(operation.Operation): diff --git a/keras_core/ops/operation_utils.py b/keras/ops/operation_utils.py similarity index 98% rename from keras_core/ops/operation_utils.py rename to keras/ops/operation_utils.py index bfac73ec4..b26de9857 100644 --- a/keras_core/ops/operation_utils.py +++ b/keras/ops/operation_utils.py @@ -3,7 +3,7 @@ import math import numpy as np import tree -from keras_core.api_export import keras_core_export +from keras.api_export import keras_export def compute_pooling_output_shape( @@ -236,7 +236,7 @@ def reduce_shape(shape, axis=None, keepdims=False): return tuple(shape) -@keras_core_export("keras_core.utils.get_source_inputs") +@keras_export("keras.utils.get_source_inputs") def get_source_inputs(tensor): """Returns the list of input tensors necessary to compute `tensor`. diff --git a/keras_core/ops/operation_utils_test.py b/keras/ops/operation_utils_test.py similarity index 97% rename from keras_core/ops/operation_utils_test.py rename to keras/ops/operation_utils_test.py index e0ec7c6fd..8d8b066bf 100644 --- a/keras_core/ops/operation_utils_test.py +++ b/keras/ops/operation_utils_test.py @@ -1,8 +1,8 @@ -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.layers.core import input_layer -from keras_core.ops import operation_utils +from keras import backend +from keras import ops +from keras import testing +from keras.layers.core import input_layer +from keras.ops import operation_utils class OperationUtilsTest(testing.TestCase): diff --git a/keras_core/ops/symbolic_arguments.py b/keras/ops/symbolic_arguments.py similarity index 97% rename from keras_core/ops/symbolic_arguments.py rename to keras/ops/symbolic_arguments.py index c7f83b0a1..b187ea186 100644 --- a/keras_core/ops/symbolic_arguments.py +++ b/keras/ops/symbolic_arguments.py @@ -1,6 +1,6 @@ import tree -from keras_core.backend import KerasTensor +from keras.backend import KerasTensor class SymbolicArguments: diff --git a/keras_core/ops/symbolic_arguments_test.py b/keras/ops/symbolic_arguments_test.py similarity index 96% rename from keras_core/ops/symbolic_arguments_test.py rename to keras/ops/symbolic_arguments_test.py index e3743fd41..d7a5eddb8 100644 --- a/keras_core/ops/symbolic_arguments_test.py +++ b/keras/ops/symbolic_arguments_test.py @@ -1,8 +1,8 @@ import tree -from keras_core import testing -from keras_core.backend import KerasTensor -from keras_core.ops.symbolic_arguments import SymbolicArguments +from keras import testing +from keras.backend import KerasTensor +from keras.ops.symbolic_arguments import SymbolicArguments class SymbolicArgumentsTest(testing.TestCase): diff --git a/keras_core/optimizers/__init__.py b/keras/optimizers/__init__.py similarity index 69% rename from keras_core/optimizers/__init__.py rename to keras/optimizers/__init__.py index 86d38da6d..b79fbbf12 100644 --- a/keras_core/optimizers/__init__.py +++ b/keras/optimizers/__init__.py @@ -1,18 +1,18 @@ -from keras_core.api_export import keras_core_export -from keras_core.optimizers.adadelta import Adadelta -from keras_core.optimizers.adafactor import Adafactor -from keras_core.optimizers.adagrad import Adagrad -from keras_core.optimizers.adam import Adam -from keras_core.optimizers.adamax import Adamax -from keras_core.optimizers.adamw import AdamW -from keras_core.optimizers.ftrl import Ftrl -from keras_core.optimizers.lion import Lion -from keras_core.optimizers.loss_scale_optimizer import LossScaleOptimizer -from keras_core.optimizers.nadam import Nadam -from keras_core.optimizers.optimizer import Optimizer -from keras_core.optimizers.rmsprop import RMSprop -from keras_core.optimizers.sgd import SGD -from keras_core.saving import serialization_lib +from keras.api_export import keras_export +from keras.optimizers.adadelta import Adadelta +from keras.optimizers.adafactor import Adafactor +from keras.optimizers.adagrad import Adagrad +from keras.optimizers.adam import Adam +from keras.optimizers.adamax import Adamax +from keras.optimizers.adamw import AdamW +from keras.optimizers.ftrl import Ftrl +from keras.optimizers.lion import Lion +from keras.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.optimizers.nadam import Nadam +from keras.optimizers.optimizer import Optimizer +from keras.optimizers.rmsprop import RMSprop +from keras.optimizers.sgd import SGD +from keras.saving import serialization_lib ALL_OBJECTS = { Optimizer, @@ -32,7 +32,7 @@ ALL_OBJECTS = { ALL_OBJECTS_DICT = {cls.__name__.lower(): cls for cls in ALL_OBJECTS} -@keras_core_export("keras_core.optimizers.serialize") +@keras_export("keras.optimizers.serialize") def serialize(optimizer): """Returns the optimizer configuration as a Python dict. @@ -45,7 +45,7 @@ def serialize(optimizer): return serialization_lib.serialize_keras_object(optimizer) -@keras_core_export("keras_core.optimizers.deserialize") +@keras_export("keras.optimizers.deserialize") def deserialize(config, custom_objects=None): """Returns a Keras optimizer object via its configuration. @@ -69,7 +69,7 @@ def deserialize(config, custom_objects=None): ) -@keras_core_export("keras_core.optimizers.get") +@keras_export("keras.optimizers.get") def get(identifier): """Retrieves a Keras Optimizer instance. diff --git a/keras_core/optimizers/adadelta.py b/keras/optimizers/adadelta.py similarity index 93% rename from keras_core/optimizers/adadelta.py rename to keras/optimizers/adadelta.py index 117ee6658..0cacd48b8 100644 --- a/keras_core/optimizers/adadelta.py +++ b/keras/optimizers/adadelta.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.Adadelta"]) +@keras_export(["keras.optimizers.Adadelta"]) class Adadelta(optimizer.Optimizer): """Optimizer that implements the Adadelta algorithm. @@ -22,7 +22,7 @@ class Adadelta(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. Note that `Adadelta` tends to benefit from higher initial learning rate values compared diff --git a/keras_core/optimizers/adadelta_test.py b/keras/optimizers/adadelta_test.py similarity index 94% rename from keras_core/optimizers/adadelta_test.py rename to keras/optimizers/adadelta_test.py index f80236c1f..c7c9dd7d3 100644 --- a/keras_core/optimizers/adadelta_test.py +++ b/keras/optimizers/adadelta_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.adadelta import Adadelta +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.adadelta import Adadelta class AdadeltaTest(testing.TestCase): diff --git a/keras_core/optimizers/adafactor.py b/keras/optimizers/adafactor.py similarity index 96% rename from keras_core/optimizers/adafactor.py rename to keras/optimizers/adafactor.py index ad301ab4c..9c5f00556 100644 --- a/keras_core/optimizers/adafactor.py +++ b/keras/optimizers/adafactor.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.Adafactor"]) +@keras_export(["keras.optimizers.Adafactor"]) class Adafactor(optimizer.Optimizer): """Optimizer that implements the Adafactor algorithm. @@ -18,7 +18,7 @@ class Adafactor(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. beta_2_decay: float, defaults to -0.8. The decay rate of `beta_2`. diff --git a/keras_core/optimizers/adafactor_test.py b/keras/optimizers/adafactor_test.py similarity index 96% rename from keras_core/optimizers/adafactor_test.py rename to keras/optimizers/adafactor_test.py index 05c3d8af4..cfcc90b14 100644 --- a/keras_core/optimizers/adafactor_test.py +++ b/keras/optimizers/adafactor_test.py @@ -3,9 +3,9 @@ import numpy as np -from keras_core import backend -from keras_core import testing -from keras_core.optimizers.adafactor import Adafactor +from keras import backend +from keras import testing +from keras.optimizers.adafactor import Adafactor class AdafactorTest(testing.TestCase): diff --git a/keras_core/optimizers/adagrad.py b/keras/optimizers/adagrad.py similarity index 92% rename from keras_core/optimizers/adagrad.py rename to keras/optimizers/adagrad.py index a4feb52d1..81bf910f9 100644 --- a/keras_core/optimizers/adagrad.py +++ b/keras/optimizers/adagrad.py @@ -1,10 +1,10 @@ -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.Adagrad"]) +@keras_export(["keras.optimizers.Adagrad"]) class Adagrad(optimizer.Optimizer): """Optimizer that implements the Adagrad algorithm. @@ -15,7 +15,7 @@ class Adagrad(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. Note that `Adagrad` tends to benefit from higher initial learning rate values compared diff --git a/keras_core/optimizers/adagrad_test.py b/keras/optimizers/adagrad_test.py similarity index 95% rename from keras_core/optimizers/adagrad_test.py rename to keras/optimizers/adagrad_test.py index a53109029..a724540fa 100644 --- a/keras_core/optimizers/adagrad_test.py +++ b/keras/optimizers/adagrad_test.py @@ -3,10 +3,10 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.adagrad import Adagrad +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.adagrad import Adagrad class AdagradTest(testing.TestCase): diff --git a/keras_core/optimizers/adam.py b/keras/optimizers/adam.py similarity index 95% rename from keras_core/optimizers/adam.py rename to keras/optimizers/adam.py index 0b696a620..bcf25ae6a 100644 --- a/keras_core/optimizers/adam.py +++ b/keras/optimizers/adam.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.Adam"]) +@keras_export(["keras.optimizers.Adam"]) class Adam(optimizer.Optimizer): """Optimizer that implements the Adam algorithm. @@ -19,7 +19,7 @@ class Adam(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. beta_1: A float value or a constant float tensor, or a callable diff --git a/keras_core/optimizers/adam_test.py b/keras/optimizers/adam_test.py similarity index 90% rename from keras_core/optimizers/adam_test.py rename to keras/optimizers/adam_test.py index f8b5c1066..4f33f4afd 100644 --- a/keras_core/optimizers/adam_test.py +++ b/keras/optimizers/adam_test.py @@ -1,11 +1,11 @@ import numpy as np import pytest -import keras_core -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.adam import Adam +import keras +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.adam import Adam class AdamTest(testing.TestCase): @@ -79,8 +79,8 @@ class AdamTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_ema(self): # TODO: test correctness - model = keras_core.Sequential([keras_core.layers.Dense(10)]) + model = keras.Sequential([keras.layers.Dense(10)]) model.compile(optimizer=Adam(use_ema=True), loss="mse") - x = keras_core.ops.zeros((1, 5)) - y = keras_core.ops.zeros((1, 10)) + x = keras.ops.zeros((1, 5)) + y = keras.ops.zeros((1, 10)) model.fit(x, y) diff --git a/keras_core/optimizers/adamax.py b/keras/optimizers/adamax.py similarity index 94% rename from keras_core/optimizers/adamax.py rename to keras/optimizers/adamax.py index 1526801fd..9e27b4f22 100644 --- a/keras_core/optimizers/adamax.py +++ b/keras/optimizers/adamax.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.Adamax"]) +@keras_export(["keras.optimizers.Adamax"]) class Adamax(optimizer.Optimizer): """Optimizer that implements the Adamax algorithm. @@ -35,7 +35,7 @@ class Adamax(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. beta_1: A float value or a constant float tensor. The exponential decay diff --git a/keras_core/optimizers/adamax_test.py b/keras/optimizers/adamax_test.py similarity index 95% rename from keras_core/optimizers/adamax_test.py rename to keras/optimizers/adamax_test.py index 512f49152..f040d508b 100644 --- a/keras_core/optimizers/adamax_test.py +++ b/keras/optimizers/adamax_test.py @@ -3,10 +3,10 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.adamax import Adamax +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.adamax import Adamax class AdamaxTest(testing.TestCase): diff --git a/keras_core/optimizers/adamw.py b/keras/optimizers/adamw.py similarity index 92% rename from keras_core/optimizers/adamw.py rename to keras/optimizers/adamw.py index cc4247d2b..cda4e5bd4 100644 --- a/keras_core/optimizers/adamw.py +++ b/keras/optimizers/adamw.py @@ -1,9 +1,9 @@ -from keras_core.api_export import keras_core_export -from keras_core.optimizers import adam -from keras_core.optimizers import optimizer +from keras.api_export import keras_export +from keras.optimizers import adam +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.AdamW"]) +@keras_export(["keras.optimizers.AdamW"]) class AdamW(adam.Adam): """Optimizer that implements the AdamW algorithm. @@ -22,7 +22,7 @@ class AdamW(adam.Adam): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. beta_1: A float value or a constant float tensor, or a callable diff --git a/keras_core/optimizers/adamw_test.py b/keras/optimizers/adamw_test.py similarity index 95% rename from keras_core/optimizers/adamw_test.py rename to keras/optimizers/adamw_test.py index cf65099ff..37ee3865a 100644 --- a/keras_core/optimizers/adamw_test.py +++ b/keras/optimizers/adamw_test.py @@ -3,10 +3,10 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.adamw import AdamW +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.adamw import AdamW class AdamWTest(testing.TestCase): diff --git a/keras_core/optimizers/base_optimizer.py b/keras/optimizers/base_optimizer.py similarity index 98% rename from keras_core/optimizers/base_optimizer.py rename to keras/optimizers/base_optimizer.py index db042570a..9b2323e49 100644 --- a/keras_core/optimizers/base_optimizer.py +++ b/keras/optimizers/base_optimizer.py @@ -1,13 +1,13 @@ import re import warnings -from keras_core import backend -from keras_core import initializers -from keras_core import ops -from keras_core.optimizers.schedules import learning_rate_schedule -from keras_core.saving import serialization_lib -from keras_core.utils import tracking -from keras_core.utils.naming import auto_name +from keras import backend +from keras import initializers +from keras import ops +from keras.optimizers.schedules import learning_rate_schedule +from keras.saving import serialization_lib +from keras.utils import tracking +from keras.utils.naming import auto_name class BaseOptimizer: @@ -688,7 +688,7 @@ base_optimizer_keyword_args = """name: String. The name to use be multiplied the loss before computing gradients, and the inverse of the scale factor will be multiplied by the gradients before updating variables. Useful for preventing underflow during mixed precision - training. Alternately, `keras_core.optimizers.LossScaleOptimizer` will + training. Alternately, `keras.optimizers.LossScaleOptimizer` will automatically set a loss scale factor. """ diff --git a/keras_core/optimizers/ftrl.py b/keras/optimizers/ftrl.py similarity index 96% rename from keras_core/optimizers/ftrl.py rename to keras/optimizers/ftrl.py index 10933420d..3743ff08d 100644 --- a/keras_core/optimizers/ftrl.py +++ b/keras/optimizers/ftrl.py @@ -1,10 +1,10 @@ -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.Ftrl"]) +@keras_export(["keras.optimizers.Ftrl"]) class Ftrl(optimizer.Optimizer): r"""Optimizer that implements the FTRL algorithm. @@ -53,7 +53,7 @@ class Ftrl(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. learning_rate_power: A float value, must be less or equal to zero. diff --git a/keras_core/optimizers/ftrl_test.py b/keras/optimizers/ftrl_test.py similarity index 95% rename from keras_core/optimizers/ftrl_test.py rename to keras/optimizers/ftrl_test.py index 1fd9975ab..c1688b8c2 100644 --- a/keras_core/optimizers/ftrl_test.py +++ b/keras/optimizers/ftrl_test.py @@ -3,9 +3,9 @@ import numpy as np -from keras_core import backend -from keras_core import testing -from keras_core.optimizers.ftrl import Ftrl +from keras import backend +from keras import testing +from keras.optimizers.ftrl import Ftrl class FtrlTest(testing.TestCase): diff --git a/keras_core/optimizers/lion.py b/keras/optimizers/lion.py similarity index 94% rename from keras_core/optimizers/lion.py rename to keras/optimizers/lion.py index 22c6f1b4d..c65e108cd 100644 --- a/keras_core/optimizers/lion.py +++ b/keras/optimizers/lion.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.Lion"]) +@keras_export(["keras.optimizers.Lion"]) class Lion(optimizer.Optimizer): """Optimizer that implements the Lion algorithm. @@ -20,7 +20,7 @@ class Lion(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. beta_1: A float value or a constant float tensor, or a callable diff --git a/keras_core/optimizers/lion_test.py b/keras/optimizers/lion_test.py similarity index 89% rename from keras_core/optimizers/lion_test.py rename to keras/optimizers/lion_test.py index a31509103..b9cf8f126 100644 --- a/keras_core/optimizers/lion_test.py +++ b/keras/optimizers/lion_test.py @@ -1,11 +1,11 @@ import numpy as np import pytest -import keras_core -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.lion import Lion +import keras +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.lion import Lion class LionTest(testing.TestCase): @@ -78,8 +78,8 @@ class LionTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_ema(self): # TODO: test correctness - model = keras_core.Sequential([keras_core.layers.Dense(10)]) + model = keras.Sequential([keras.layers.Dense(10)]) model.compile(optimizer=Lion(use_ema=True), loss="mse") - x = keras_core.ops.zeros((1, 5)) - y = keras_core.ops.zeros((1, 10)) + x = keras.ops.zeros((1, 5)) + y = keras.ops.zeros((1, 10)) model.fit(x, y) diff --git a/keras_core/optimizers/loss_scale_optimizer.py b/keras/optimizers/loss_scale_optimizer.py similarity index 94% rename from keras_core/optimizers/loss_scale_optimizer.py rename to keras/optimizers/loss_scale_optimizer.py index 571e1de10..9646c0187 100644 --- a/keras_core/optimizers/loss_scale_optimizer.py +++ b/keras/optimizers/loss_scale_optimizer.py @@ -1,16 +1,16 @@ -from keras_core import backend -from keras_core import initializers -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer -from keras_core.saving import serialization_lib -from keras_core.utils import tracking +from keras import backend +from keras import initializers +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer +from keras.saving import serialization_lib +from keras.utils import tracking -@keras_core_export( +@keras_export( [ - "keras_core.optimizers.LossScaleOptimizer", - "keras_core.mixed_precision.LossScaleOptimizer", + "keras.optimizers.LossScaleOptimizer", + "keras.mixed_precision.LossScaleOptimizer", ] ) class LossScaleOptimizer(optimizer.Optimizer): @@ -32,7 +32,7 @@ class LossScaleOptimizer(optimizer.Optimizer): is doubled. Args: - inner_optimizer: The `keras_core.optimizers.Optimizer` instance to wrap. + inner_optimizer: The `keras.optimizers.Optimizer` instance to wrap. initial_scale: Float. The initial loss scale. This scale will be updated during training. It is recommended for this to be a very high number, because a loss scale that is too high gets lowered far more diff --git a/keras_core/optimizers/loss_scale_optimizer_test.py b/keras/optimizers/loss_scale_optimizer_test.py similarity index 95% rename from keras_core/optimizers/loss_scale_optimizer_test.py rename to keras/optimizers/loss_scale_optimizer_test.py index 9bce63d06..d5bc3ba07 100644 --- a/keras_core/optimizers/loss_scale_optimizer_test.py +++ b/keras/optimizers/loss_scale_optimizer_test.py @@ -1,11 +1,11 @@ import numpy as np from absl.testing import parameterized -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.loss_scale_optimizer import LossScaleOptimizer -from keras_core.optimizers.sgd import SGD +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.optimizers.sgd import SGD class LossScaleOptimizerTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/optimizers/nadam.py b/keras/optimizers/nadam.py similarity index 94% rename from keras_core/optimizers/nadam.py rename to keras/optimizers/nadam.py index 83d8ee194..2f1b9f61b 100644 --- a/keras_core/optimizers/nadam.py +++ b/keras/optimizers/nadam.py @@ -1,10 +1,10 @@ -from keras_core import backend -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import backend +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.Nadam"]) +@keras_export(["keras.optimizers.Nadam"]) class Nadam(optimizer.Optimizer): """Optimizer that implements the Nadam algorithm. @@ -13,7 +13,7 @@ class Nadam(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. beta_1: A float value or a constant float tensor, or a callable diff --git a/keras_core/optimizers/nadam_test.py b/keras/optimizers/nadam_test.py similarity index 95% rename from keras_core/optimizers/nadam_test.py rename to keras/optimizers/nadam_test.py index ac7c1b3b7..c50d070b8 100644 --- a/keras_core/optimizers/nadam_test.py +++ b/keras/optimizers/nadam_test.py @@ -3,10 +3,10 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.nadam import Nadam +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.nadam import Nadam class NadamTest(testing.TestCase): diff --git a/keras_core/optimizers/optimizer.py b/keras/optimizers/optimizer.py similarity index 52% rename from keras_core/optimizers/optimizer.py rename to keras/optimizers/optimizer.py index 643229815..c35a3b20a 100644 --- a/keras_core/optimizers/optimizer.py +++ b/keras/optimizers/optimizer.py @@ -1,20 +1,20 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.optimizers import base_optimizer +from keras import backend +from keras.api_export import keras_export +from keras.optimizers import base_optimizer if backend.backend() == "tensorflow": - from keras_core.backend.tensorflow.optimizer import TFOptimizer + from keras.backend.tensorflow.optimizer import TFOptimizer BackendOptimizer = TFOptimizer elif backend.backend() == "torch": - from keras_core.backend.torch.optimizers import TorchOptimizer + from keras.backend.torch.optimizers import TorchOptimizer BackendOptimizer = TorchOptimizer else: BackendOptimizer = base_optimizer.BaseOptimizer -@keras_core_export(["keras_core.Optimizer", "keras_core.optimizers.Optimizer"]) +@keras_export(["keras.Optimizer", "keras.optimizers.Optimizer"]) class Optimizer(BackendOptimizer): pass diff --git a/keras_core/optimizers/optimizer_test.py b/keras/optimizers/optimizer_test.py similarity index 93% rename from keras_core/optimizers/optimizer_test.py rename to keras/optimizers/optimizer_test.py index 5ffef05a3..7005565b1 100644 --- a/keras_core/optimizers/optimizer_test.py +++ b/keras/optimizers/optimizer_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras_core import backend -from keras_core import constraints -from keras_core import optimizers -from keras_core import testing +from keras import backend +from keras import constraints +from keras import optimizers +from keras import testing class OptimizerTest(testing.TestCase): diff --git a/keras_core/optimizers/rmsprop.py b/keras/optimizers/rmsprop.py similarity index 93% rename from keras_core/optimizers/rmsprop.py rename to keras/optimizers/rmsprop.py index 11cbcd063..2d518b08e 100644 --- a/keras_core/optimizers/rmsprop.py +++ b/keras/optimizers/rmsprop.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export(["keras_core.optimizers.RMSprop"]) +@keras_export(["keras.optimizers.RMSprop"]) class RMSprop(optimizer.Optimizer): """Optimizer that implements the RMSprop algorithm. @@ -19,7 +19,7 @@ class RMSprop(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. rho: float, defaults to 0.9. Discounting factor for the old gradients. @@ -37,8 +37,8 @@ class RMSprop(optimizer.Optimizer): Usage: - >>> opt = keras_core.optimizers.RMSprop(learning_rate=0.1) - >>> var1 = keras_core.backend.Variable(10.0) + >>> opt = keras.optimizers.RMSprop(learning_rate=0.1) + >>> var1 = keras.backend.Variable(10.0) >>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1 >>> opt.minimize(loss, [var1]) >>> var1 diff --git a/keras_core/optimizers/rmsprop_test.py b/keras/optimizers/rmsprop_test.py similarity index 94% rename from keras_core/optimizers/rmsprop_test.py rename to keras/optimizers/rmsprop_test.py index 444fe834e..862e0b868 100644 --- a/keras_core/optimizers/rmsprop_test.py +++ b/keras/optimizers/rmsprop_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.rmsprop import RMSprop +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.rmsprop import RMSprop class RMSpropTest(testing.TestCase): diff --git a/keras/optimizers/schedules/__init__.py b/keras/optimizers/schedules/__init__.py new file mode 100644 index 000000000..af8fb1cd7 --- /dev/null +++ b/keras/optimizers/schedules/__init__.py @@ -0,0 +1,16 @@ +from keras.optimizers.schedules.learning_rate_schedule import CosineDecay +from keras.optimizers.schedules.learning_rate_schedule import ( + CosineDecayRestarts, +) +from keras.optimizers.schedules.learning_rate_schedule import ( + ExponentialDecay, +) +from keras.optimizers.schedules.learning_rate_schedule import ( + InverseTimeDecay, +) +from keras.optimizers.schedules.learning_rate_schedule import ( + PiecewiseConstantDecay, +) +from keras.optimizers.schedules.learning_rate_schedule import ( + PolynomialDecay, +) diff --git a/keras_core/optimizers/schedules/learning_rate_schedule.py b/keras/optimizers/schedules/learning_rate_schedule.py similarity index 90% rename from keras_core/optimizers/schedules/learning_rate_schedule.py rename to keras/optimizers/schedules/learning_rate_schedule.py index 6267aeb5e..5f5845bbd 100644 --- a/keras_core/optimizers/schedules/learning_rate_schedule.py +++ b/keras/optimizers/schedules/learning_rate_schedule.py @@ -2,12 +2,12 @@ import math -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.saving import serialization_lib +from keras import ops +from keras.api_export import keras_export +from keras.saving import serialization_lib -@keras_core_export("keras_core.optimizers.schedules.LearningRateSchedule") +@keras_export("keras.optimizers.schedules.LearningRateSchedule") class LearningRateSchedule: """The learning rate schedule base class. @@ -15,15 +15,15 @@ class LearningRateSchedule: of your optimizer changes over time. Several built-in learning rate schedules are available, such as - `keras_core.optimizers.schedules.ExponentialDecay` or - `keras_core.optimizers.schedules.PiecewiseConstantDecay`: + `keras.optimizers.schedules.ExponentialDecay` or + `keras.optimizers.schedules.PiecewiseConstantDecay`: ```python - lr_schedule = keras_core.optimizers.schedules.ExponentialDecay( + lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-2, decay_steps=10000, decay_rate=0.9) - optimizer = keras_core.optimizers.SGD(learning_rate=lr_schedule) + optimizer = keras.optimizers.SGD(learning_rate=lr_schedule) ``` A `LearningRateSchedule` instance can be passed in as the `learning_rate` @@ -39,7 +39,7 @@ class LearningRateSchedule: Example: ```python - class MyLRSchedule(keras_core.optimizers.schedules.LearningRateSchedule): + class MyLRSchedule(keras.optimizers.schedules.LearningRateSchedule): def __init__(self, initial_learning_rate): self.initial_learning_rate = initial_learning_rate @@ -47,7 +47,7 @@ class LearningRateSchedule: def __call__(self, step): return self.initial_learning_rate / (step + 1) - optimizer = keras_core.optimizers.SGD(learning_rate=MyLRSchedule(0.1)) + optimizer = keras.optimizers.SGD(learning_rate=MyLRSchedule(0.1)) ``` """ @@ -76,7 +76,7 @@ class LearningRateSchedule: return cls(**config) -@keras_core_export("keras_core.optimizers.schedules.ExponentialDecay") +@keras_export("keras.optimizers.schedules.ExponentialDecay") class ExponentialDecay(LearningRateSchedule): """A `LearningRateSchedule` that uses an exponential decay schedule. @@ -98,20 +98,20 @@ class ExponentialDecay(LearningRateSchedule): an integer division and the decayed learning rate follows a staircase function. - You can pass this schedule directly into a `keras_core.optimizers.Optimizer` + You can pass this schedule directly into a `keras.optimizers.Optimizer` as the learning rate. Example: When fitting a Keras model, decay every 100000 steps with a base of 0.96: ```python initial_learning_rate = 0.1 - lr_schedule = keras_core.optimizers.schedules.ExponentialDecay( + lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True) - model.compile(optimizer=keras_core.optimizers.SGD(learning_rate=lr_schedule), + model.compile(optimizer=keras.optimizers.SGD(learning_rate=lr_schedule), loss='sparse_categorical_crossentropy', metrics=['accuracy']) @@ -119,8 +119,8 @@ class ExponentialDecay(LearningRateSchedule): ``` The learning rate schedule is also serializable and deserializable using - `keras_core.optimizers.schedules.serialize` and - `keras_core.optimizers.schedules.deserialize`. + `keras.optimizers.schedules.serialize` and + `keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A Python float. The initial learning rate. @@ -178,7 +178,7 @@ class ExponentialDecay(LearningRateSchedule): } -@keras_core_export("keras_core.optimizers.schedules.PiecewiseConstantDecay") +@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay") class PiecewiseConstantDecay(LearningRateSchedule): """A `LearningRateSchedule` that uses a piecewise constant decay schedule. @@ -193,17 +193,17 @@ class PiecewiseConstantDecay(LearningRateSchedule): step = ops.array(0) boundaries = [100000, 110000] values = [1.0, 0.5, 0.1] - learning_rate_fn = keras_core.optimizers.schedules.PiecewiseConstantDecay( + learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay( boundaries, values) # Later, whenever we perform an optimization step, we pass in the step. learning_rate = learning_rate_fn(step) ``` - You can pass this schedule directly into a `keras_core.optimizers.Optimizer` + You can pass this schedule directly into a `keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and - deserializable using `keras_core.optimizers.schedules.serialize` and - `keras_core.optimizers.schedules.deserialize`. + deserializable using `keras.optimizers.schedules.serialize` and + `keras.optimizers.schedules.deserialize`. Args: boundaries: A list of Python numbers with strictly increasing @@ -295,7 +295,7 @@ class PiecewiseConstantDecay(LearningRateSchedule): } -@keras_core_export("keras_core.optimizers.schedules.PolynomialDecay") +@keras_export("keras.optimizers.schedules.PolynomialDecay") class PolynomialDecay(LearningRateSchedule): """A `LearningRateSchedule` that uses a polynomial decay schedule. @@ -333,7 +333,7 @@ class PolynomialDecay(LearningRateSchedule): ) + end_learning_rate ``` - You can pass this schedule directly into a `keras_core.optimizers.Optimizer` + You can pass this schedule directly into a `keras.optimizers.Optimizer` as the learning rate. Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5): @@ -343,13 +343,13 @@ class PolynomialDecay(LearningRateSchedule): starter_learning_rate = 0.1 end_learning_rate = 0.01 decay_steps = 10000 - learning_rate_fn = keras_core.optimizers.schedules.PolynomialDecay( + learning_rate_fn = keras.optimizers.schedules.PolynomialDecay( starter_learning_rate, decay_steps, end_learning_rate, power=0.5) - model.compile(optimizer=keras_core.optimizers.SGD( + model.compile(optimizer=keras.optimizers.SGD( learning_rate=learning_rate_fn), loss='sparse_categorical_crossentropy', metrics=['accuracy']) @@ -358,8 +358,8 @@ class PolynomialDecay(LearningRateSchedule): ``` The learning rate schedule is also serializable and deserializable using - `keras_core.optimizers.schedules.serialize` and - `keras_core.optimizers.schedules.deserialize`. + `keras.optimizers.schedules.serialize` and + `keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A Python float. The initial learning rate. @@ -445,7 +445,7 @@ class PolynomialDecay(LearningRateSchedule): } -@keras_core_export("keras_core.optimizers.schedules.InverseTimeDecay") +@keras_export("keras.optimizers.schedules.InverseTimeDecay") class InverseTimeDecay(LearningRateSchedule): """A `LearningRateSchedule` that uses an inverse time decay schedule. @@ -473,7 +473,7 @@ class InverseTimeDecay(LearningRateSchedule): (1 + decay_rate * floor(step / decay_step)) ``` - You can pass this schedule directly into a `keras_core.optimizers.Optimizer` + You can pass this schedule directly into a `keras.optimizers.Optimizer` as the learning rate. Example: Fit a Keras model when decaying 1/t with a rate of 0.5: @@ -482,10 +482,10 @@ class InverseTimeDecay(LearningRateSchedule): initial_learning_rate = 0.1 decay_steps = 1.0 decay_rate = 0.5 - learning_rate_fn = keras_core.optimizers.schedules.InverseTimeDecay( + learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay( initial_learning_rate, decay_steps, decay_rate) - model.compile(optimizer=keras_core.optimizers.SGD( + model.compile(optimizer=keras.optimizers.SGD( learning_rate=learning_rate_fn), loss='sparse_categorical_crossentropy', metrics=['accuracy']) @@ -551,7 +551,7 @@ class InverseTimeDecay(LearningRateSchedule): } -@keras_core_export("keras_core.optimizers.schedules.CosineDecay") +@keras_export("keras.optimizers.schedules.CosineDecay") class CosineDecay(LearningRateSchedule): """A `LearningRateSchedule` that uses a cosine decay with optional warmup. @@ -606,7 +606,7 @@ class CosineDecay(LearningRateSchedule): ```python decay_steps = 1000 initial_learning_rate = 0.1 - lr_decayed_fn = keras_core.optimizers.schedules.CosineDecay( + lr_decayed_fn = keras.optimizers.schedules.CosineDecay( initial_learning_rate, decay_steps) ``` @@ -617,16 +617,16 @@ class CosineDecay(LearningRateSchedule): initial_learning_rate = 0 warmup_steps = 1000 target_learning_rate = 0.1 - lr_warmup_decayed_fn = keras_core.optimizers.schedules.CosineDecay( + lr_warmup_decayed_fn = keras.optimizers.schedules.CosineDecay( initial_learning_rate, decay_steps, warmup_target=target_learning_rate, warmup_steps=warmup_steps ) ``` - You can pass this schedule directly into a `keras_core.optimizers.Optimizer` + You can pass this schedule directly into a `keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and - deserializable using `keras_core.optimizers.schedules.serialize` and - `keras_core.optimizers.schedules.deserialize`. + deserializable using `keras.optimizers.schedules.serialize` and + `keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A Python float. The initial learning rate. @@ -736,7 +736,7 @@ class CosineDecay(LearningRateSchedule): } -@keras_core_export("keras_core.optimizers.schedules.CosineDecayRestarts") +@keras_export("keras.optimizers.schedules.CosineDecayRestarts") class CosineDecayRestarts(LearningRateSchedule): """A `LearningRateSchedule` that uses a cosine decay schedule with restarts. @@ -762,15 +762,15 @@ class CosineDecayRestarts(LearningRateSchedule): ```python first_decay_steps = 1000 lr_decayed_fn = ( - keras_core.optimizers.schedules.CosineDecayRestarts( + keras.optimizers.schedules.CosineDecayRestarts( initial_learning_rate, first_decay_steps)) ``` - You can pass this schedule directly into a `keras_core.optimizers.Optimizer` + You can pass this schedule directly into a `keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and - deserializable using `keras_core.optimizers.schedules.serialize` and - `keras_core.optimizers.schedules.deserialize`. + deserializable using `keras.optimizers.schedules.serialize` and + `keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A Python float. The initial learning rate. @@ -873,7 +873,7 @@ class CosineDecayRestarts(LearningRateSchedule): } -@keras_core_export("keras_core.optimizers.schedules.serialize") +@keras_export("keras.optimizers.schedules.serialize") def serialize(learning_rate_schedule): """Serializes a `LearningRateSchedule` into a JSON-compatible dict. @@ -885,17 +885,17 @@ def serialize(learning_rate_schedule): Example: - >>> lr_schedule = keras_core.optimizers.schedules.ExponentialDecay( + >>> lr_schedule = keras.optimizers.schedules.ExponentialDecay( ... 0.1, decay_steps=100000, decay_rate=0.96, staircase=True) - >>> keras_core.optimizers.schedules.serialize(lr_schedule) - {'module': 'keras_core.optimizers.schedules', + >>> keras.optimizers.schedules.serialize(lr_schedule) + {'module': 'keras.optimizers.schedules', 'class_name': 'ExponentialDecay', 'config': {...}, 'registered_name': None} """ return serialization_lib.serialize_keras_object(learning_rate_schedule) -@keras_core_export("keras_core.optimizers.schedules.deserialize") +@keras_export("keras.optimizers.schedules.deserialize") def deserialize(config, custom_objects=None): """Instantiates a `LearningRateSchedule` object from a serialized form. @@ -922,7 +922,7 @@ def deserialize(config, custom_objects=None): 'power': 0.5 } } - lr_schedule = keras_core.optimizers.schedules.deserialize(config) + lr_schedule = keras.optimizers.schedules.deserialize(config) ``` """ return serialization_lib.deserialize_keras_object( diff --git a/keras_core/optimizers/schedules/learning_rate_schedule_test.py b/keras/optimizers/schedules/learning_rate_schedule_test.py similarity index 98% rename from keras_core/optimizers/schedules/learning_rate_schedule_test.py rename to keras/optimizers/schedules/learning_rate_schedule_test.py index dd9388d6f..34d51b5d3 100644 --- a/keras_core/optimizers/schedules/learning_rate_schedule_test.py +++ b/keras/optimizers/schedules/learning_rate_schedule_test.py @@ -5,12 +5,12 @@ import math import numpy as np import pytest -from keras_core import backend -from keras_core import layers -from keras_core import optimizers -from keras_core import testing -from keras_core.models import Sequential -from keras_core.optimizers import schedules +from keras import backend +from keras import layers +from keras import optimizers +from keras import testing +from keras.models import Sequential +from keras.optimizers import schedules class TestFitLRSchedulesFlow(testing.TestCase): diff --git a/keras_core/optimizers/sgd.py b/keras/optimizers/sgd.py similarity index 93% rename from keras_core/optimizers/sgd.py rename to keras/optimizers/sgd.py index aae7f32df..d60c2efaf 100644 --- a/keras_core/optimizers/sgd.py +++ b/keras/optimizers/sgd.py @@ -1,9 +1,9 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.optimizers import optimizer +from keras import ops +from keras.api_export import keras_export +from keras.optimizers import optimizer -@keras_core_export("keras_core.optimizers.SGD") +@keras_export("keras.optimizers.SGD") class SGD(optimizer.Optimizer): """Gradient descent (with momentum) optimizer. @@ -29,7 +29,7 @@ class SGD(optimizer.Optimizer): Args: learning_rate: A float, a - `keras_core.optimizers.schedules.LearningRateSchedule` instance, or + `keras.optimizers.schedules.LearningRateSchedule` instance, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.01`. momentum: float hyperparameter >= 0 that accelerates gradient descent in diff --git a/keras_core/optimizers/sgd_test.py b/keras/optimizers/sgd_test.py similarity index 96% rename from keras_core/optimizers/sgd_test.py rename to keras/optimizers/sgd_test.py index 191ed3c1d..b8eaeaff7 100644 --- a/keras_core/optimizers/sgd_test.py +++ b/keras/optimizers/sgd_test.py @@ -2,10 +2,10 @@ import numpy as np -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.optimizers.sgd import SGD +from keras import backend +from keras import ops +from keras import testing +from keras.optimizers.sgd import SGD class SGDTest(testing.TestCase): diff --git a/keras_core/random/__init__.py b/keras/random/__init__.py similarity index 100% rename from keras_core/random/__init__.py rename to keras/random/__init__.py diff --git a/keras_core/random/random.py b/keras/random/random.py similarity index 83% rename from keras_core/random/random.py rename to keras/random/random.py index 08579d5e0..a4d45080c 100644 --- a/keras_core/random/random.py +++ b/keras/random/random.py @@ -1,8 +1,8 @@ -from keras_core import backend -from keras_core.api_export import keras_core_export +from keras import backend +from keras.api_export import keras_export -@keras_core_export("keras_core.random.normal") +@keras_export("keras.random.normal") def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Draw random samples from a normal (Gaussian) distribution. @@ -12,24 +12,24 @@ def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): stddev: Floats, defaults to 1. Standard deviation of the random values to generate. dtype: Optional dtype of the tensor. Only floating point types are - supported. If not specified, `keras_core.config.floatx()` is used, + supported. If not specified, `keras.config.floatx()` is used, which defaults to `float32` unless you configured it otherwise (via - `keras_core.config.set_floatx(float_dtype)`). + `keras.config.set_floatx(float_dtype)`). seed: A Python integer or instance of - `keras_core.random.SeedGenerator`. + `keras.random.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or None (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.random.SeedGenerator`. + of `keras.random.SeedGenerator`. """ return backend.random.normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed ) -@keras_core_export("keras_core.random.categorical") +@keras_export("keras.random.categorical") def categorical(logits, num_samples, dtype="int32", seed=None): """Draws samples from a categorical distribution. @@ -52,13 +52,13 @@ def categorical(logits, num_samples, dtype="int32", seed=None): tensor's shape. dtype: Optional dtype of the output tensor. seed: A Python integer or instance of - `keras_core.random.SeedGenerator`. + `keras.random.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or None (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.random.SeedGenerator`. + of `keras.random.SeedGenerator`. Returns: A 2-D tensor with (batch_size, num_samples). @@ -74,7 +74,7 @@ def categorical(logits, num_samples, dtype="int32", seed=None): ) -@keras_core_export("keras_core.random.uniform") +@keras_export("keras.random.uniform") def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): """Draw samples from a uniform distribution. @@ -91,21 +91,21 @@ def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): maxval: Floats, defaults to 1. Upper bound of the range of random values to generate (exclusive). dtype: Optional dtype of the tensor. Only floating point types are - supported. If not specified, `keras_core.config.floatx()` is used, + supported. If not specified, `keras.config.floatx()` is used, which defaults to `float32` unless you configured it otherwise (via - `keras_core.config.set_floatx(float_dtype)`) + `keras.config.set_floatx(float_dtype)`) seed: A Python integer or instance of - `keras_core.random.SeedGenerator`. + `keras.random.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or None (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.random.SeedGenerator`. + of `keras.random.SeedGenerator`. """ if dtype and not backend.is_float_dtype(dtype): raise ValueError( - "`keras_core.random.uniform` requires a floating point `dtype`. " + "`keras.random.uniform` requires a floating point `dtype`. " f"Received: dtype={dtype} " ) return backend.random.uniform( @@ -113,7 +113,7 @@ def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): ) -@keras_core_export("keras_core.random.randint") +@keras_export("keras.random.randint") def randint(shape, minval, maxval, dtype="int32", seed=None): """Draw random integers from a uniform distribution. @@ -130,21 +130,21 @@ def randint(shape, minval, maxval, dtype="int32", seed=None): maxval: Floats, defaults to 1. Upper bound of the range of random values to generate (exclusive). dtype: Optional dtype of the tensor. Only integer types are - supported. If not specified, `keras_core.config.floatx()` is used, + supported. If not specified, `keras.config.floatx()` is used, which defaults to `float32` unless you configured it otherwise (via - `keras_core.config.set_floatx(float_dtype)`) + `keras.config.set_floatx(float_dtype)`) seed: A Python integer or instance of - `keras_core.random.SeedGenerator`. + `keras.random.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or None (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.random.SeedGenerator`. + of `keras.random.SeedGenerator`. """ if dtype and not backend.is_int_dtype(dtype): raise ValueError( - "`keras_core.random.randint` requires an integer `dtype`. " + "`keras.random.randint` requires an integer `dtype`. " f"Received: dtype={dtype} " ) return backend.random.randint( @@ -152,7 +152,7 @@ def randint(shape, minval, maxval, dtype="int32", seed=None): ) -@keras_core_export("keras_core.random.truncated_normal") +@keras_export("keras.random.truncated_normal") def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Draw samples from a truncated normal distribution. @@ -170,27 +170,27 @@ def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): which defaults to `float32` unless you configured it otherwise (via `keras.config.set_floatx(float_dtype)`) seed: A Python integer or instance of - `keras_core.random.SeedGenerator`. + `keras.random.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or None (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.random.SeedGenerator`. + of `keras.random.SeedGenerator`. """ return backend.random.truncated_normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed ) -@keras_core_export("keras_core.random.dropout") +@keras_export("keras.random.dropout") def dropout(inputs, rate, noise_shape=None, seed=None): return backend.random.dropout( inputs, rate, noise_shape=noise_shape, seed=seed ) -@keras_core_export("keras_core.random.shuffle") +@keras_export("keras.random.shuffle") def shuffle(x, axis=0, seed=None): """Shuffle the elements of a tensor uniformly at random along an axis. @@ -199,12 +199,12 @@ def shuffle(x, axis=0, seed=None): axis: An integer specifying the axis along which to shuffle. Defaults to `0`. seed: A Python integer or instance of - `keras_core.random.SeedGenerator`. + `keras.random.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or None (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance - of `keras_core.random.SeedGenerator`. + of `keras.random.SeedGenerator`. """ return backend.random.shuffle(x, axis=axis, seed=seed) diff --git a/keras_core/random/random_test.py b/keras/random/random_test.py similarity index 94% rename from keras_core/random/random_test.py rename to keras/random/random_test.py index f3c46582b..46c47cd40 100644 --- a/keras_core/random/random_test.py +++ b/keras/random/random_test.py @@ -2,12 +2,12 @@ import numpy as np import pytest from absl.testing import parameterized -import keras_core -from keras_core import backend -from keras_core import ops -from keras_core import testing -from keras_core.random import random -from keras_core.random import seed_generator +import keras +from keras import backend +from keras import ops +from keras import testing +from keras.random import random +from keras.random import seed_generator class RandomTest(testing.TestCase, parameterized.TestCase): @@ -83,7 +83,7 @@ class RandomTest(testing.TestCase, parameterized.TestCase): self.assertLessEqual(ops.max(res), max) self.assertGreaterEqual(ops.max(res), min) # Torch has incomplete dtype support for uints; will remap some dtypes. - if keras_core.backend.backend() != "torch": + if keras.backend.backend() != "torch": self.assertEqual(backend.standardize_dtype(res.dtype), dtype) @parameterized.parameters( @@ -114,7 +114,7 @@ class RandomTest(testing.TestCase, parameterized.TestCase): self.assertGreater(ops.sum(x_res == 0), 2) @pytest.mark.skipif( - keras_core.backend.backend() != "jax", + keras.backend.backend() != "jax", reason="This test requires `jax` as the backend.", ) def test_dropout_jax_jit_stateless(self): @@ -125,8 +125,8 @@ class RandomTest(testing.TestCase, parameterized.TestCase): @jax.jit def train_step(x): - with keras_core.backend.StatelessScope(): - x = keras_core.layers.Dropout(rate=0.1)(x, training=True) + with keras.backend.StatelessScope(): + x = keras.layers.Dropout(rate=0.1)(x, training=True) return x x = train_step(x) @@ -140,7 +140,7 @@ class RandomTest(testing.TestCase, parameterized.TestCase): self.assertEqual(x.shape, (2, 3, 5, 7)) @pytest.mark.skipif( - keras_core.backend.backend() != "jax", + keras.backend.backend() != "jax", reason="This test requires `jax` as the backend.", ) def test_jax_rngkey_seed(self): @@ -155,7 +155,7 @@ class RandomTest(testing.TestCase, parameterized.TestCase): self.assertIsInstance(x, jnp.ndarray) @pytest.mark.skipif( - keras_core.backend.backend() != "jax", + keras.backend.backend() != "jax", reason="This test requires `jax` as the backend.", ) def test_jax_unseed_disallowed_during_tracing(self): diff --git a/keras_core/random/seed_generator.py b/keras/random/seed_generator.py similarity index 83% rename from keras_core/random/seed_generator.py rename to keras/random/seed_generator.py index b3d0d0db7..2f58617b5 100644 --- a/keras_core/random/seed_generator.py +++ b/keras/random/seed_generator.py @@ -2,17 +2,17 @@ import random as python_random import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state -from keras_core.utils import jax_utils +from keras import backend +from keras.api_export import keras_export +from keras.backend.common import global_state +from keras.utils import jax_utils -@keras_core_export("keras_core.random.SeedGenerator") +@keras_export("keras.random.SeedGenerator") class SeedGenerator: """Generates variable seeds upon each call to a RNG-using function. - In Keras, all RNG-using methods (such as `keras_core.random.normal()`) + In Keras, all RNG-using methods (such as `keras.random.normal()`) are stateless, meaning that if you pass an integer seed to them (such as `seed=42`), they will return the same values at each call. In order to get different values at each call, you must use a @@ -22,22 +22,22 @@ class SeedGenerator: Example: ```python - seed_gen = keras_core.random.SeedGenerator(seed=42) - values = keras_core.random.normal(shape=(2, 3), seed=seed_gen) - new_values = keras_core.random.normal(shape=(2, 3), seed=seed_gen) + seed_gen = keras.random.SeedGenerator(seed=42) + values = keras.random.normal(shape=(2, 3), seed=seed_gen) + new_values = keras.random.normal(shape=(2, 3), seed=seed_gen) ``` Usage in a layer: ```python - class Dropout(keras_core.Layer): + class Dropout(keras.Layer): def __init__(self, **kwargs): super().__init__(**kwargs) - self.seed_generator = keras_core.random.SeedGenerator(1337) + self.seed_generator = keras.random.SeedGenerator(1337) def call(self, x, training=False): if training: - return keras_core.random.dropout( + return keras.random.dropout( x, rate=0.5, seed=self.seed_generator ) return x @@ -113,7 +113,7 @@ def make_default_seed(): def draw_seed(seed): - from keras_core.backend import convert_to_tensor + from keras.backend import convert_to_tensor if isinstance(seed, SeedGenerator): return seed.next() diff --git a/keras_core/regularizers/__init__.py b/keras/regularizers/__init__.py similarity index 66% rename from keras_core/regularizers/__init__.py rename to keras/regularizers/__init__.py index 963d073bf..aea31e9fb 100644 --- a/keras_core/regularizers/__init__.py +++ b/keras/regularizers/__init__.py @@ -1,13 +1,13 @@ import inspect -from keras_core.api_export import keras_core_export -from keras_core.regularizers.regularizers import L1 -from keras_core.regularizers.regularizers import L1L2 -from keras_core.regularizers.regularizers import L2 -from keras_core.regularizers.regularizers import OrthogonalRegularizer -from keras_core.regularizers.regularizers import Regularizer -from keras_core.saving import serialization_lib -from keras_core.utils.naming import to_snake_case +from keras.api_export import keras_export +from keras.regularizers.regularizers import L1 +from keras.regularizers.regularizers import L1L2 +from keras.regularizers.regularizers import L2 +from keras.regularizers.regularizers import OrthogonalRegularizer +from keras.regularizers.regularizers import Regularizer +from keras.saving import serialization_lib +from keras.utils.naming import to_snake_case ALL_OBJECTS = { Regularizer, @@ -23,12 +23,12 @@ ALL_OBJECTS_DICT.update( ) -@keras_core_export("keras_core.regularizers.serialize") +@keras_export("keras.regularizers.serialize") def serialize(initializer): return serialization_lib.serialize_keras_object(initializer) -@keras_core_export("keras_core.regularizers.deserialize") +@keras_export("keras.regularizers.deserialize") def deserialize(config, custom_objects=None): """Return a Keras regularizer object via its config.""" return serialization_lib.deserialize_keras_object( @@ -38,7 +38,7 @@ def deserialize(config, custom_objects=None): ) -@keras_core_export("keras_core.regularizers.get") +@keras_export("keras.regularizers.get") def get(identifier): """Retrieve a Keras regularizer object via an identifier.""" if identifier is None: diff --git a/keras_core/regularizers/regularizers.py b/keras/regularizers/regularizers.py similarity index 94% rename from keras_core/regularizers/regularizers.py rename to keras/regularizers/regularizers.py index 0b69402de..d1de26838 100644 --- a/keras_core/regularizers/regularizers.py +++ b/keras/regularizers/regularizers.py @@ -1,12 +1,12 @@ import math -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.utils.numerical_utils import normalize +from keras import ops +from keras.api_export import keras_export +from keras.utils.numerical_utils import normalize -@keras_core_export( - ["keras_core.Regularizer", "keras_core.regularizers.Regularizer"] +@keras_export( + ["keras.Regularizer", "keras.regularizers.Regularizer"] ) class Regularizer: """Regularizer base class. @@ -167,8 +167,8 @@ class Regularizer: raise NotImplementedError(f"{self} does not implement get_config()") -@keras_core_export( - ["keras_core.regularizers.L1L2", "keras_core.regularizers.l1_l2"] +@keras_export( + ["keras.regularizers.L1L2", "keras.regularizers.l1_l2"] ) class L1L2(Regularizer): """A regularizer that applies both L1 and L2 regularization penalties. @@ -214,7 +214,7 @@ class L1L2(Regularizer): return {"l1": float(self.l1), "l2": float(self.l2)} -@keras_core_export(["keras_core.regularizers.L1", "keras_core.regularizers.l1"]) +@keras_export(["keras.regularizers.L1", "keras.regularizers.l1"]) class L1(Regularizer): """A regularizer that applies a L1 regularization penalty. @@ -243,7 +243,7 @@ class L1(Regularizer): return {"l1": float(self.l1)} -@keras_core_export(["keras_core.regularizers.L2", "keras_core.regularizers.l2"]) +@keras_export(["keras.regularizers.L2", "keras.regularizers.l2"]) class L2(Regularizer): """A regularizer that applies a L2 regularization penalty. @@ -272,10 +272,10 @@ class L2(Regularizer): return {"l2": float(self.l2)} -@keras_core_export( +@keras_export( [ - "keras_core.regularizers.OrthogonalRegularizer", - "keras_core.regularizers.orthogonal_regularizer", + "keras.regularizers.OrthogonalRegularizer", + "keras.regularizers.orthogonal_regularizer", ] ) class OrthogonalRegularizer(Regularizer): diff --git a/keras_core/regularizers/regularizers_test.py b/keras/regularizers/regularizers_test.py similarity index 94% rename from keras_core/regularizers/regularizers_test.py rename to keras/regularizers/regularizers_test.py index e4c3fea9a..5e03473dd 100644 --- a/keras_core/regularizers/regularizers_test.py +++ b/keras/regularizers/regularizers_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras_core import backend -from keras_core import regularizers -from keras_core import testing +from keras import backend +from keras import regularizers +from keras import testing # TODO: serialization tests diff --git a/keras/saving/__init__.py b/keras/saving/__init__.py new file mode 100644 index 000000000..9f709a3c1 --- /dev/null +++ b/keras/saving/__init__.py @@ -0,0 +1,9 @@ +from keras.saving.object_registration import CustomObjectScope +from keras.saving.object_registration import custom_object_scope +from keras.saving.object_registration import get_custom_objects +from keras.saving.object_registration import get_registered_name +from keras.saving.object_registration import get_registered_object +from keras.saving.object_registration import register_keras_serializable +from keras.saving.saving_api import load_model +from keras.saving.serialization_lib import deserialize_keras_object +from keras.saving.serialization_lib import serialize_keras_object diff --git a/keras_core/saving/object_registration.py b/keras/saving/object_registration.py similarity index 87% rename from keras_core/saving/object_registration.py rename to keras/saving/object_registration.py index 589355966..09d1638ef 100644 --- a/keras_core/saving/object_registration.py +++ b/keras/saving/object_registration.py @@ -1,26 +1,26 @@ import inspect -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state +from keras.api_export import keras_export +from keras.backend.common import global_state GLOBAL_CUSTOM_OBJECTS = {} GLOBAL_CUSTOM_NAMES = {} -@keras_core_export( +@keras_export( [ - "keras_core.saving.CustomObjectScope", - "keras_core.saving.custom_object_scope", - "keras_core.utils.CustomObjectScope", - "keras_core.utils.custom_object_scope", + "keras.saving.CustomObjectScope", + "keras.saving.custom_object_scope", + "keras.utils.CustomObjectScope", + "keras.utils.custom_object_scope", ] ) class CustomObjectScope: """Exposes custom classes/functions to Keras deserialization internals. Under a scope `with custom_object_scope(objects_dict)`, Keras methods such - as `keras_core.models.load_model()` or - `keras_core.models.model_from_config()` will be able to deserialize any + as `keras.models.load_model()` or + `keras.models.model_from_config()` will be able to deserialize any custom object referenced by a saved config (e.g. a custom layer or metric). Example: @@ -64,10 +64,10 @@ class CustomObjectScope: custom_object_scope = CustomObjectScope -@keras_core_export( +@keras_export( [ - "keras_core.saving.get_custom_objects", - "keras_core.utils.get_custom_objects", + "keras.saving.get_custom_objects", + "keras.utils.get_custom_objects", ] ) def get_custom_objects(): @@ -90,10 +90,10 @@ def get_custom_objects(): return GLOBAL_CUSTOM_OBJECTS -@keras_core_export( +@keras_export( [ - "keras_core.saving.register_keras_serializable", - "keras_core.utils.register_keras_serializable", + "keras.saving.register_keras_serializable", + "keras.utils.register_keras_serializable", ] ) def register_keras_serializable(package="Custom", name=None): @@ -116,7 +116,7 @@ def register_keras_serializable(package="Custom", name=None): # Note that `'my_package'` is used as the `package` argument here, and since # the `name` argument is not provided, `'MyDense'` is used as the `name`. @register_keras_serializable('my_package') - class MyDense(keras_core.layers.Dense): + class MyDense(keras.layers.Dense): pass assert get_registered_object('my_package>MyDense') == MyDense @@ -155,10 +155,10 @@ def register_keras_serializable(package="Custom", name=None): return decorator -@keras_core_export( +@keras_export( [ - "keras_core.saving.get_registered_name", - "keras_core.utils.get_registered_name", + "keras.saving.get_registered_name", + "keras.utils.get_registered_name", ] ) def get_registered_name(obj): @@ -181,10 +181,10 @@ def get_registered_name(obj): return obj.__name__ -@keras_core_export( +@keras_export( [ - "keras_core.saving.get_registered_object", - "keras_core.utils.get_registered_object", + "keras.saving.get_registered_object", + "keras.utils.get_registered_object", ] ) def get_registered_object(name, custom_objects=None, module_objects=None): diff --git a/keras_core/saving/object_registration_test.py b/keras/saving/object_registration_test.py similarity index 84% rename from keras_core/saving/object_registration_test.py rename to keras/saving/object_registration_test.py index 56b5744d4..4da7606dd 100644 --- a/keras_core/saving/object_registration_test.py +++ b/keras/saving/object_registration_test.py @@ -1,7 +1,7 @@ -import keras_core -from keras_core import testing -from keras_core.saving import object_registration -from keras_core.saving import serialization_lib +import keras +from keras import testing +from keras.saving import object_registration +from keras.saving import serialization_lib class TestObjectRegistration(testing.TestCase): @@ -16,17 +16,17 @@ class TestObjectRegistration(testing.TestCase): with object_registration.custom_object_scope( {"CustomClass": CustomClass, "custom_fn": custom_fn} ): - actual_custom_fn = keras_core.activations.get("custom_fn") + actual_custom_fn = keras.activations.get("custom_fn") self.assertEqual(actual_custom_fn, custom_fn) - actual_custom_class = keras_core.regularizers.get("CustomClass") + actual_custom_class = keras.regularizers.get("CustomClass") self.assertEqual(actual_custom_class.__class__, CustomClass) with object_registration.custom_object_scope( {"CustomClass": CustomClass, "custom_fn": custom_fn} ): - actual_custom_fn = keras_core.activations.get("custom_fn") + actual_custom_fn = keras.activations.get("custom_fn") self.assertEqual(actual_custom_fn, custom_fn) - actual_custom_class = keras_core.regularizers.get("CustomClass") + actual_custom_class = keras.regularizers.get("CustomClass") self.assertEqual(actual_custom_class.__class__, CustomClass) checked_thread = self.checkedThread(check_get_in_thread) checked_thread.start() @@ -82,9 +82,9 @@ class TestObjectRegistration(testing.TestCase): cls = object_registration.get_registered_object(fn_class_name) self.assertEqual(OtherTestClass, cls) - config = keras_core.saving.serialize_keras_object(inst) + config = keras.saving.serialize_keras_object(inst) self.assertEqual("OtherTestClass", config["class_name"]) - new_inst = keras_core.saving.deserialize_keras_object(config) + new_inst = keras.saving.deserialize_keras_object(config) self.assertIsNot(inst, new_inst) self.assertIsInstance(new_inst, OtherTestClass) self.assertEqual(5, new_inst._val) @@ -100,8 +100,8 @@ class TestObjectRegistration(testing.TestCase): fn_class_name = object_registration.get_registered_name(my_fn) self.assertEqual(fn_class_name, class_name) - config = keras_core.saving.serialize_keras_object(my_fn) - fn = keras_core.saving.deserialize_keras_object(config) + config = keras.saving.serialize_keras_object(my_fn) + fn = keras.saving.deserialize_keras_object(config) self.assertEqual(42, fn()) fn_2 = object_registration.get_registered_object(fn_class_name) diff --git a/keras_core/saving/saving_api.py b/keras/saving/saving_api.py similarity index 86% rename from keras_core/saving/saving_api.py rename to keras/saving/saving_api.py index a4c910590..8f822adb9 100644 --- a/keras_core/saving/saving_api.py +++ b/keras/saving/saving_api.py @@ -3,11 +3,11 @@ import zipfile from absl import logging -from keras_core.api_export import keras_core_export -from keras_core.legacy.saving import legacy_h5_format -from keras_core.saving import saving_lib -from keras_core.utils import file_utils -from keras_core.utils import io_utils +from keras.api_export import keras_export +from keras.legacy.saving import legacy_h5_format +from keras.saving import saving_lib +from keras.utils import file_utils +from keras.utils import io_utils try: import h5py @@ -15,8 +15,8 @@ except ImportError: h5py = None -@keras_core_export( - ["keras_core.saving.save_model", "keras_core.models.save_model"] +@keras_export( + ["keras.saving.save_model", "keras.models.save_model"] ) def save_model(model, filepath, overwrite=True, **kwargs): """Saves a model as a `.keras` file. @@ -30,19 +30,19 @@ def save_model(model, filepath, overwrite=True, **kwargs): Example: ```python - model = keras_core.Sequential( + model = keras.Sequential( [ - keras_core.layers.Dense(5, input_shape=(3,)), - keras_core.layers.Softmax(), + keras.layers.Dense(5, input_shape=(3,)), + keras.layers.Softmax(), ], ) model.save("model.keras") - loaded_model = keras_core.saving.load_model("model.keras") + loaded_model = keras.saving.load_model("model.keras") x = keras.random.uniform((10, 3)) assert np.allclose(model.predict(x), loaded_model.predict(x)) ``` - Note that `model.save()` is an alias for `keras_core.saving.save_model()`. + Note that `model.save()` is an alias for `keras.saving.save_model()`. The saved `.keras` file contains: @@ -59,14 +59,14 @@ def save_model(model, filepath, overwrite=True, **kwargs): ".keras" ): logging.warning( - "The `save_format` argument is deprecated in Keras Core. " + "The `save_format` argument is deprecated in Keras 3. " "We recommend removing this argument as it can be inferred " "from the file path. " f"Received: save_format={save_format}" ) else: raise ValueError( - "The `save_format` argument is deprecated in Keras Core. " + "The `save_format` argument is deprecated in Keras 3. " "Please remove this argument and pass a file path with " "either `.keras` or `.h5` extension." f"Received: save_format={save_format}" @@ -112,8 +112,8 @@ def save_model(model, filepath, overwrite=True, **kwargs): ) -@keras_core_export( - ["keras_core.saving.load_model", "keras_core.models.load_model"] +@keras_export( + ["keras.saving.load_model", "keras.models.load_model"] ) def load_model(filepath, custom_objects=None, compile=True, safe_mode=True): """Loads a model saved via `model.save()`. @@ -137,11 +137,11 @@ def load_model(filepath, custom_objects=None, compile=True, safe_mode=True): Example: ```python - model = keras_core.Sequential([ - keras_core.layers.Dense(5, input_shape=(3,)), - keras_core.layers.Softmax()]) + model = keras.Sequential([ + keras.layers.Dense(5, input_shape=(3,)), + keras.layers.Softmax()]) model.save("model.keras") - loaded_model = keras_core.saving.load_model("model.keras") + loaded_model = keras.saving.load_model("model.keras") x = np.random.random((10, 3)) assert np.allclose(model.predict(x), loaded_model.predict(x)) ``` @@ -191,13 +191,13 @@ def load_model(filepath, custom_objects=None, compile=True, safe_mode=True): else: raise ValueError( f"File format not supported: filepath={filepath}. " - "Keras Core only supports V3 `.keras` files and " + "Keras 3 only supports V3 `.keras` files and " "legacy H5 format files (`.h5` extension). " "Note that the legacy SavedModel format is not " - "supported by `load_model()` in Keras Core. In " + "supported by `load_model()` in Keras 3. In " "order to reload a TensorFlow SavedModel as an " - "inference-only layer in Keras Core, use " - "`keras_core.layers.TFSMLayer(" + "inference-only layer in Keras 3, use " + "`keras.layers.TFSMLayer(" f"{filepath}, call_endpoint='serving_default')` " "(note that your `call_endpoint` " "might have a different name)." @@ -237,6 +237,6 @@ def load_weights(model, filepath, skip_mismatch=False, **kwargs): else: raise ValueError( f"File format not supported: filepath={filepath}. " - "Keras Core only supports V3 `.keras` and `.weights.h5` " + "Keras 3 only supports V3 `.keras` and `.weights.h5` " "files, or legacy V1/V2 `.h5` files." ) diff --git a/keras_core/saving/saving_api_test.py b/keras/saving/saving_api_test.py similarity index 97% rename from keras_core/saving/saving_api_test.py rename to keras/saving/saving_api_test.py index d31981a1e..f41de47e4 100644 --- a/keras_core/saving/saving_api_test.py +++ b/keras/saving/saving_api_test.py @@ -4,10 +4,10 @@ import unittest.mock as mock import numpy as np from absl import logging -from keras_core import layers -from keras_core.models import Sequential -from keras_core.saving import saving_api -from keras_core.testing import test_case +from keras import layers +from keras.models import Sequential +from keras.saving import saving_api +from keras.testing import test_case class SaveModelTests(test_case.TestCase): diff --git a/keras_core/saving/saving_lib.py b/keras/saving/saving_lib.py similarity index 96% rename from keras_core/saving/saving_lib.py rename to keras/saving/saving_lib.py index b9c4f786f..c8fcde56b 100644 --- a/keras_core/saving/saving_lib.py +++ b/keras/saving/saving_lib.py @@ -9,18 +9,18 @@ import zipfile import numpy as np -from keras_core.backend.common import global_state -from keras_core.layers.layer import Layer -from keras_core.losses.loss import Loss -from keras_core.metrics.metric import Metric -from keras_core.optimizers.optimizer import Optimizer -from keras_core.saving.serialization_lib import ObjectSharingScope -from keras_core.saving.serialization_lib import deserialize_keras_object -from keras_core.saving.serialization_lib import serialize_keras_object -from keras_core.trainers.compile_utils import CompileMetrics -from keras_core.utils import file_utils -from keras_core.utils import naming -from keras_core.version import __version__ as keras_version +from keras.backend.common import global_state +from keras.layers.layer import Layer +from keras.losses.loss import Loss +from keras.metrics.metric import Metric +from keras.optimizers.optimizer import Optimizer +from keras.saving.serialization_lib import ObjectSharingScope +from keras.saving.serialization_lib import deserialize_keras_object +from keras.saving.serialization_lib import serialize_keras_object +from keras.trainers.compile_utils import CompileMetrics +from keras.utils import file_utils +from keras.utils import naming +from keras.version import __version__ as keras_version try: import h5py @@ -253,8 +253,8 @@ def _write_to_zip_recursively(zipfile_to_save, system_path, zip_path): def _walk_trackable(trackable): - from keras_core.models import Functional - from keras_core.models import Sequential + from keras.models import Functional + from keras.models import Sequential if isinstance(trackable, Sequential): obj_type = "Sequential" diff --git a/keras_core/saving/saving_lib_test.py b/keras/saving/saving_lib_test.py similarity index 85% rename from keras_core/saving/saving_lib_test.py rename to keras/saving/saving_lib_test.py index 06e0572eb..85e3f4fa8 100644 --- a/keras_core/saving/saving_lib_test.py +++ b/keras/saving/saving_lib_test.py @@ -9,18 +9,18 @@ from unittest import mock import numpy as np import pytest -import keras_core -from keras_core import ops -from keras_core import testing -from keras_core.saving import saving_lib +import keras +from keras import ops +from keras import testing +from keras.saving import saving_lib -@keras_core.saving.register_keras_serializable(package="my_custom_package") -class MyDense(keras_core.layers.Layer): +@keras.saving.register_keras_serializable(package="my_custom_package") +class MyDense(keras.layers.Layer): def __init__(self, units, **kwargs): super().__init__(**kwargs) self.units = units - self.nested_layer = keras_core.layers.Dense(self.units, name="dense") + self.nested_layer = keras.layers.Dense(self.units, name="dense") def build(self, input_shape): self.additional_weights = [ @@ -58,7 +58,7 @@ ASSETS_DATA = "These are my assets" VARIABLES_DATA = np.random.random((10,)) -@keras_core.saving.register_keras_serializable(package="my_custom_package") +@keras.saving.register_keras_serializable(package="my_custom_package") class LayerWithCustomSaving(MyDense): def build(self, input_shape): self.assets = ASSETS_DATA @@ -81,8 +81,8 @@ class LayerWithCustomSaving(MyDense): self.stored_variables = np.array(store["variables"]) -@keras_core.saving.register_keras_serializable(package="my_custom_package") -class CustomModelX(keras_core.Model): +@keras.saving.register_keras_serializable(package="my_custom_package") +class CustomModelX(keras.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dense1 = MyDense(1, name="my_dense_1") @@ -96,8 +96,8 @@ class CustomModelX(keras_core.Model): return 1 -@keras_core.saving.register_keras_serializable(package="my_custom_package") -class ModelWithCustomSaving(keras_core.Model): +@keras.saving.register_keras_serializable(package="my_custom_package") +class ModelWithCustomSaving(keras.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.custom_dense = LayerWithCustomSaving(1) @@ -106,8 +106,8 @@ class ModelWithCustomSaving(keras_core.Model): return self.custom_dense(inputs) -@keras_core.saving.register_keras_serializable(package="my_custom_package") -class CompileOverridingModel(keras_core.Model): +@keras.saving.register_keras_serializable(package="my_custom_package") +class CompileOverridingModel(keras.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dense1 = MyDense(1) @@ -119,13 +119,13 @@ class CompileOverridingModel(keras_core.Model): return self.dense1(inputs) -@keras_core.saving.register_keras_serializable(package="my_custom_package") -class CompileOverridingSequential(keras_core.Sequential): +@keras.saving.register_keras_serializable(package="my_custom_package") +class CompileOverridingSequential(keras.Sequential): def compile(self, *args, **kwargs): super().compile(*args, **kwargs) -@keras_core.saving.register_keras_serializable(package="my_custom_package") +@keras.saving.register_keras_serializable(package="my_custom_package") def my_mean_squared_error(y_true, y_pred): """Identical to built-in `mean_squared_error`, but as a custom fn.""" return ops.mean(ops.square(y_pred - y_true), axis=-1) @@ -137,29 +137,29 @@ def _get_subclassed_model(compile=True): subclassed_model.compile( optimizer="adam", loss=my_mean_squared_error, - metrics=[keras_core.metrics.Hinge(), "mse"], + metrics=[keras.metrics.Hinge(), "mse"], ) return subclassed_model def _get_custom_sequential_model(compile=True): - sequential_model = keras_core.Sequential( + sequential_model = keras.Sequential( [MyDense(1), MyDense(1)], name="sequential" ) if compile: sequential_model.compile( optimizer="adam", loss=my_mean_squared_error, - metrics=[keras_core.metrics.Hinge(), "mse"], + metrics=[keras.metrics.Hinge(), "mse"], ) return sequential_model def _get_basic_sequential_model(compile=True): - sequential_model = keras_core.Sequential( + sequential_model = keras.Sequential( [ - keras_core.layers.Dense(1, name="dense_1"), - keras_core.layers.Dense(1, name="dense_2"), + keras.layers.Dense(1, name="dense_1"), + keras.layers.Dense(1, name="dense_2"), ], name="sequential", ) @@ -167,35 +167,35 @@ def _get_basic_sequential_model(compile=True): sequential_model.compile( optimizer="adam", loss=my_mean_squared_error, - metrics=[keras_core.metrics.Hinge(), "mse"], + metrics=[keras.metrics.Hinge(), "mse"], ) return sequential_model def _get_custom_functional_model(compile=True): - inputs = keras_core.Input(shape=(4,), batch_size=2) + inputs = keras.Input(shape=(4,), batch_size=2) x = MyDense(1, name="first_dense")(inputs) outputs = MyDense(1, name="second_dense")(x) - functional_model = keras_core.Model(inputs, outputs) + functional_model = keras.Model(inputs, outputs) if compile: functional_model.compile( optimizer="adam", loss=my_mean_squared_error, - metrics=[keras_core.metrics.Hinge(), "mse"], + metrics=[keras.metrics.Hinge(), "mse"], ) return functional_model def _get_basic_functional_model(compile=True): - inputs = keras_core.Input(shape=(4,), batch_size=2) - x = keras_core.layers.Dense(1, name="first_dense")(inputs) - outputs = keras_core.layers.Dense(1, name="second_dense")(x) - functional_model = keras_core.Model(inputs, outputs) + inputs = keras.Input(shape=(4,), batch_size=2) + x = keras.layers.Dense(1, name="first_dense")(inputs) + outputs = keras.layers.Dense(1, name="second_dense")(x) + functional_model = keras.Model(inputs, outputs) if compile: functional_model.compile( optimizer="adam", loss=my_mean_squared_error, - metrics=[keras_core.metrics.Hinge(), "mse"], + metrics=[keras.metrics.Hinge(), "mse"], ) return functional_model @@ -349,7 +349,7 @@ class SavingTest(testing.TestCase): CompileOverridingModel() if model_type == "subclassed" else CompileOverridingSequential( - [keras_core.layers.Embedding(4, 1), MyDense(1), MyDense(1)] + [keras.layers.Embedding(4, 1), MyDense(1), MyDense(1)] ) ) model.compile("sgd", "mse") @@ -464,21 +464,21 @@ class SavingTest(testing.TestCase): def test_partial_load(self): temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") - original_model = keras_core.Sequential( + original_model = keras.Sequential( [ - keras_core.Input(shape=(3,), batch_size=2), - keras_core.layers.Dense(4), - keras_core.layers.Dense(5), + keras.Input(shape=(3,), batch_size=2), + keras.layers.Dense(4), + keras.layers.Dense(5), ] ) original_model.save(temp_filepath) # Test with a model that has a differently shaped layer - new_model = keras_core.Sequential( + new_model = keras.Sequential( [ - keras_core.Input(shape=(3,), batch_size=2), - keras_core.layers.Dense(4), - keras_core.layers.Dense(6), + keras.Input(shape=(3,), batch_size=2), + keras.layers.Dense(4), + keras.layers.Dense(6), ] ) new_layer_kernel_value = np.array(new_model.layers[1].kernel) @@ -497,12 +497,12 @@ class SavingTest(testing.TestCase): ) # Test with a model that has a new layer at the end - new_model = keras_core.Sequential( + new_model = keras.Sequential( [ - keras_core.Input(shape=(3,), batch_size=2), - keras_core.layers.Dense(4), - keras_core.layers.Dense(5), - keras_core.layers.Dense(5), + keras.Input(shape=(3,), batch_size=2), + keras.layers.Dense(4), + keras.layers.Dense(5), + keras.layers.Dense(5), ] ) new_layer_kernel_value = np.array(new_model.layers[2].kernel) @@ -525,7 +525,7 @@ class SavingTest(testing.TestCase): @pytest.mark.requires_trainable_backend class SavingAPITest(testing.TestCase): def test_saving_api_errors(self): - from keras_core.saving import saving_api + from keras.saving import saving_api model = _get_basic_functional_model() @@ -559,7 +559,7 @@ class SavingAPITest(testing.TestCase): ref_input = np.random.random((2, 4)) ref_output = model.predict(ref_input) model.save(temp_filepath) - model = keras_core.saving.load_model(temp_filepath) + model = keras.saving.load_model(temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6) def test_model_api_endpoint_h5(self): @@ -568,7 +568,7 @@ class SavingAPITest(testing.TestCase): ref_input = np.random.random((2, 4)) ref_output = model.predict(ref_input) model.save(temp_filepath) - model = keras_core.saving.load_model(temp_filepath) + model = keras.saving.load_model(temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6) def test_model_api_errors(self): @@ -590,10 +590,10 @@ class SavingAPITest(testing.TestCase): # def test_safe_mode(self): # temp_filepath = os.path.join(self.get_temp_dir(), "unsafe_model.keras") -# model = keras_core.Sequential( +# model = keras.Sequential( # [ -# keras_core.Input(shape=(3,)), -# keras_core.layers.Dense(2, activation=lambda x: x * 2), +# keras.Input(shape=(3,)), +# keras.layers.Dense(2, activation=lambda x: x * 2), # ] # ) # model.save(temp_filepath) @@ -604,10 +604,10 @@ class SavingAPITest(testing.TestCase): # def test_normalization_kpl(self): # # With adapt # temp_filepath = os.path.join(self.get_temp_dir(), "norm_model.keras") -# model = keras_core.Sequential( +# model = keras.Sequential( # [ -# keras_core.Input(shape=(3,)), -# keras_core.layers.Normalization(), +# keras.Input(shape=(3,)), +# keras.layers.Normalization(), # ] # ) # data = np.random.random((3, 3)) @@ -619,10 +619,10 @@ class SavingAPITest(testing.TestCase): # self.assertAllClose(ref_out, out, atol=1e-6) # # Without adapt -# model = keras_core.Sequential( +# model = keras.Sequential( # [ -# keras_core.Input(shape=(3,)), -# keras_core.layers.Normalization( +# keras.Input(shape=(3,)), +# keras.layers.Normalization( # mean=np.random.random((3,)), # variance=np.random.random((3,)), # ), @@ -636,17 +636,17 @@ class SavingAPITest(testing.TestCase): # # This custom class lacks custom object registration. -# class CustomRNN(keras_core.layers.Layer): +# class CustomRNN(keras.layers.Layer): # def __init__(self, units): # super(CustomRNN, self).__init__() # self.units = units -# self.projection_1 = keras_core.layers.Dense( +# self.projection_1 = keras.layers.Dense( # units=units, activation="tanh" # ) -# self.projection_2 = keras_core.layers.Dense( +# self.projection_2 = keras.layers.Dense( # units=units, activation="tanh" # ) -# self.classifier = keras_core.layers.Dense(1) +# self.classifier = keras.layers.Dense(1) # def call(self, inputs): # outputs = [] @@ -662,9 +662,9 @@ class SavingAPITest(testing.TestCase): # # This class is properly registered with a `get_config()` method. -# # However, since it does not subclass keras_core.layers.Layer, it lacks +# # However, since it does not subclass keras.layers.Layer, it lacks # # `from_config()` for deserialization. -# @keras_core.saving.register_keras_serializable() +# @keras.saving.register_keras_serializable() # class GrowthFactor: # def __init__(self, factor): # self.factor = factor @@ -676,8 +676,8 @@ class SavingAPITest(testing.TestCase): # return {"factor": self.factor} -# @keras_core.saving.register_keras_serializable(package="Complex") -# class FactorLayer(keras_core.layers.Layer): +# @keras.saving.register_keras_serializable(package="Complex") +# class FactorLayer(keras.layers.Layer): # def __init__(self, factor): # super().__init__() # self.factor = factor @@ -692,15 +692,15 @@ class SavingAPITest(testing.TestCase): # # This custom model does not explicitly deserialize the layers it includes # # in its `get_config`. Explicit deserialization in a `from_config` override # # or `__init__` is needed here, or an error will be thrown at loading time. -# @keras_core.saving.register_keras_serializable(package="Complex") -# class ComplexModel(keras_core.layers.Layer): +# @keras.saving.register_keras_serializable(package="Complex") +# class ComplexModel(keras.layers.Layer): # def __init__(self, first_layer, second_layer=None, **kwargs): # super().__init__(**kwargs) # self.first_layer = first_layer # if second_layer is not None: # self.second_layer = second_layer # else: -# self.second_layer = keras_core.layers.Dense(8) +# self.second_layer = keras.layers.Dense(8) # def get_config(self): # config = super().get_config() @@ -725,48 +725,48 @@ class SavingAPITest(testing.TestCase): # input_dim = 5 # batch_size = 16 -# inputs = keras_core.Input( +# inputs = keras.Input( # batch_shape=(batch_size, timesteps, input_dim) # ) -# x = keras_core.layers.Conv1D(32, 3)(inputs) +# x = keras.layers.Conv1D(32, 3)(inputs) # outputs = CustomRNN(32)(x) -# model = keras_core.Model(inputs, outputs) +# model = keras.Model(inputs, outputs) # with self.assertRaisesRegex( # TypeError, "is a custom class, please register it" # ): # model.save(temp_filepath) -# _ = keras_core.models.load_model(temp_filepath) +# _ = keras.models.load_model(temp_filepath) # def test_custom_object_without_from_config(self): # temp_filepath = os.path.join( # self.get_temp_dir(), "custom_fn_model.keras" # ) -# inputs = keras_core.Input(shape=(4, 4)) -# outputs = keras_core.layers.Dense( +# inputs = keras.Input(shape=(4, 4)) +# outputs = keras.layers.Dense( # 1, activation=GrowthFactor(0.5) # )(inputs) -# model = keras_core.Model(inputs, outputs) +# model = keras.Model(inputs, outputs) # model.save(temp_filepath) # with self.assertRaisesRegex( # TypeError, "Unable to reconstruct an instance" # ): -# _ = keras_core.models.load_model(temp_filepath) +# _ = keras.models.load_model(temp_filepath) # def test_complex_model_without_explicit_deserialization(self): # temp_filepath = os.path.join( # self.get_temp_dir(), "complex_model.keras" # ) -# inputs = keras_core.Input((32,)) +# inputs = keras.Input((32,)) # outputs = ComplexModel(first_layer=FactorLayer(0.5))(inputs) -# model = keras_core.Model(inputs, outputs) +# model = keras.Model(inputs, outputs) # model.save(temp_filepath) # with self.assertRaisesRegex(TypeError, "are explicitly deserialized"): -# _ = keras_core.models.load_model(temp_filepath) +# _ = keras.models.load_model(temp_filepath) diff --git a/keras_core/saving/serialization_lib.py b/keras/saving/serialization_lib.py similarity index 94% rename from keras_core/saving/serialization_lib.py rename to keras/saving/serialization_lib.py index 6c1435af9..622bda77b 100644 --- a/keras_core/saving/serialization_lib.py +++ b/keras/saving/serialization_lib.py @@ -7,13 +7,13 @@ import warnings import numpy as np -from keras_core import api_export -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state -from keras_core.saving import object_registration -from keras_core.utils import python_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import api_export +from keras import backend +from keras.api_export import keras_export +from keras.backend.common import global_state +from keras.saving import object_registration +from keras.utils import python_utils +from keras.utils.module_utils import tensorflow as tf PLAIN_TYPES = (str, int, float, bool) @@ -53,7 +53,7 @@ class SafeModeScope: ) -@keras_core_export("keras_core.config.enable_unsafe_deserialization") +@keras_export("keras.config.enable_unsafe_deserialization") def enable_unsafe_deserialization(): """Disables safe mode globally, allowing deserialization of lambdas.""" global_state.set_global_attribute("safe_mode_saving", False) @@ -114,10 +114,10 @@ def record_object_after_deserialization(obj, obj_id): id_to_obj_map[obj_id] = obj -@keras_core_export( +@keras_export( [ - "keras_core.saving.serialize_keras_object", - "keras_core.utils.serialize_keras_object", + "keras.saving.serialize_keras_object", + "keras.utils.serialize_keras_object", ] ) def serialize_keras_object(obj): @@ -289,10 +289,10 @@ def serialize_with_public_class(cls, inner_config=None): Called to check and retrieve the config of any class that has a public Keras API or has been registered as serializable via - `keras_core.saving.register_keras_serializable()`. + `keras.saving.register_keras_serializable()`. """ - # This gets the `keras_core.*` exported name, such as - # "keras_core.optimizers.Adam". + # This gets the `keras.*` exported name, such as + # "keras.optimizers.Adam". keras_api_name = api_export.get_name_from_symbol(cls) # Case of custom or unknown class object @@ -324,7 +324,7 @@ def serialize_with_public_fn(fn, config, fn_module_name=None): Called to check and retrieve the config of any function that has a public Keras API or has been registered as serializable via - `keras_core.saving.register_keras_serializable()`. If function's module name + `keras.saving.register_keras_serializable()`. If function's module name is already known, returns corresponding config. """ if fn_module_name: @@ -383,10 +383,10 @@ def serialize_dict(obj): return {key: serialize_keras_object(value) for key, value in obj.items()} -@keras_core_export( +@keras_export( [ - "keras_core.saving.deserialize_keras_object", - "keras_core.utils.deserialize_keras_object", + "keras.saving.deserialize_keras_object", + "keras.utils.deserialize_keras_object", ] ) def deserialize_keras_object( @@ -405,9 +405,9 @@ def deserialize_keras_object( - `config`: Dict. Library-defined or user-defined key-value pairs that store the configuration of the object, as obtained by `object.get_config()`. - `module`: String. The path of the python module. Built-in Keras classes - expect to have prefix `keras_core`. + expect to have prefix `keras`. - `registered_name`: String. The key the class is registered under via - `keras_core.saving.register_keras_serializable(package, name)` API. The + `keras.saving.register_keras_serializable(package, name)` API. The key has the format of '{package}>{name}', where `package` and `name` are the arguments passed to `register_keras_serializable()`. If `name` is not provided, it uses the class name. If `registered_name` successfully @@ -430,7 +430,7 @@ def deserialize_keras_object( "learning_rate": 0.0010000000474974513, "name": "Adam" }, - "module": "keras_core.optimizers", + "module": "keras.optimizers", "registered_name": None } # Returns an `Adam` instance identical to the original one. @@ -446,7 +446,7 @@ def deserialize_keras_object( "config": { ... }, - "module": "keras_core.trainers.compile_utils", + "module": "keras.trainers.compile_utils", "registered_name": "MetricsList" } @@ -458,8 +458,8 @@ def deserialize_keras_object( loss: ```python - @keras_core.saving.register_keras_serializable(package='my_package') - class ModifiedMeanSquaredError(keras_core.losses.MeanSquaredError): + @keras.saving.register_keras_serializable(package='my_package') + class ModifiedMeanSquaredError(keras.losses.MeanSquaredError): ... dict_structure = { @@ -554,7 +554,7 @@ def deserialize_keras_object( f"`{config['class_name']}`. If " f"`{config['class_name']}` is a custom class, please " "register it using the " - "`@keras_core.saving.register_keras_serializable()` " + "`@keras.saving.register_keras_serializable()` " "decorator." ) config = config["class_name"] @@ -637,7 +637,7 @@ def deserialize_keras_object( "and thus it is disallowed by default. If you trust the " "source of the saved model, you can pass `safe_mode=False` to " "the loading function in order to allow `lambda` loading, " - "or call `keras_core.config.enable_unsafe_deserialization()`." + "or call `keras.config.enable_unsafe_deserialization()`." ) return python_utils.func_load(inner_config["value"]) if tf is not None and config["class_name"] == "__typespec__": @@ -741,7 +741,7 @@ def _retrieve_class_or_fn( # we cannot always use direct import, because the exported # module name might not match the package structure # (e.g. experimental symbols). - if module == "keras_core" or module.startswith("keras_core."): + if module == "keras" or module.startswith("keras."): api_name = module + "." + name obj = api_export.get_symbol_from_name(api_name) @@ -755,7 +755,7 @@ def _retrieve_class_or_fn( if obj_type == "function" and module == "builtins": for mod in BUILTIN_MODULES: obj = api_export.get_symbol_from_name( - "keras_core." + mod + "." + name + "keras." + mod + "." + name ) if obj is not None: return obj @@ -791,6 +791,6 @@ def _retrieve_class_or_fn( raise TypeError( f"Could not locate {obj_type} '{name}'. " "Make sure custom classes are decorated with " - "`@keras_core.saving.register_keras_serializable()`. " + "`@keras.saving.register_keras_serializable()`. " f"Full object config: {full_config}" ) diff --git a/keras_core/saving/serialization_lib_test.py b/keras/saving/serialization_lib_test.py similarity index 87% rename from keras_core/saving/serialization_lib_test.py rename to keras/saving/serialization_lib_test.py index 26daa33c2..5021df8e6 100644 --- a/keras_core/saving/serialization_lib_test.py +++ b/keras/saving/serialization_lib_test.py @@ -5,17 +5,17 @@ import json import numpy as np import pytest -import keras_core -from keras_core import ops -from keras_core import testing -from keras_core.saving import serialization_lib +import keras +from keras import ops +from keras import testing +from keras.saving import serialization_lib def custom_fn(x): return x**2 -class CustomLayer(keras_core.layers.Layer): +class CustomLayer(keras.layers.Layer): def __init__(self, factor): super().__init__() self.factor = factor @@ -27,13 +27,13 @@ class CustomLayer(keras_core.layers.Layer): return {"factor": self.factor} -class NestedCustomLayer(keras_core.layers.Layer): +class NestedCustomLayer(keras.layers.Layer): def __init__(self, factor, dense=None, activation=None): super().__init__() self.factor = factor if dense is None: - self.dense = keras_core.layers.Dense(1, activation=custom_fn) + self.dense = keras.layers.Dense(1, activation=custom_fn) else: self.dense = serialization_lib.deserialize_keras_object(dense) self.activation = serialization_lib.deserialize_keras_object(activation) @@ -49,7 +49,7 @@ class NestedCustomLayer(keras_core.layers.Layer): } -class WrapperLayer(keras_core.layers.Wrapper): +class WrapperLayer(keras.layers.Wrapper): def call(self, x): return self.layer(x) @@ -82,7 +82,7 @@ class SerializationLibTest(testing.TestCase): self.assertEqual(serialized, reserialized) def test_builtin_layers(self): - layer = keras_core.layers.Dense( + layer = keras.layers.Dense( 3, name="foo", trainable=False, @@ -112,7 +112,7 @@ class SerializationLibTest(testing.TestCase): self.assertEqual(serialized, reserialized) # Test inside layer - dense = keras_core.layers.Dense(1, activation=custom_fn) + dense = keras.layers.Dense(1, activation=custom_fn) dense.build((None, 2)) _, new_dense, _ = self.roundtrip( dense, custom_objects={"custom_fn": custom_fn} @@ -159,7 +159,7 @@ class SerializationLibTest(testing.TestCase): # TODO # def test_lambda_layer(self): - # lmbda = keras_core.layers.Lambda(lambda x: x**2) + # lmbda = keras.layers.Lambda(lambda x: x**2) # with self.assertRaisesRegex(ValueError, "arbitrary code execution"): # self.roundtrip(lmbda, safe_mode=True) @@ -170,7 +170,7 @@ class SerializationLibTest(testing.TestCase): # self.assertAllClose(y1, y2, atol=1e-5) # def test_safe_mode_scope(self): - # lmbda = keras_core.layers.Lambda(lambda x: x**2) + # lmbda = keras.layers.Lambda(lambda x: x**2) # with serialization_lib.SafeModeScope(safe_mode=True): # with self.assertRaisesRegex( # ValueError, "arbitrary code execution" @@ -185,12 +185,12 @@ class SerializationLibTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_dict_inputs_outputs(self): - input_foo = keras_core.Input((2,), name="foo") - input_bar = keras_core.Input((2,), name="bar") - dense = keras_core.layers.Dense(1) + input_foo = keras.Input((2,), name="foo") + input_bar = keras.Input((2,), name="bar") + dense = keras.layers.Dense(1) output_foo = dense(input_foo) output_bar = dense(input_bar) - model = keras_core.Model( + model = keras.Model( {"foo": input_foo, "bar": input_bar}, {"foo": output_foo, "bar": output_bar}, ) @@ -207,13 +207,13 @@ class SerializationLibTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_shared_inner_layer(self): with serialization_lib.ObjectSharingScope(): - input_1 = keras_core.Input((2,)) - input_2 = keras_core.Input((2,)) - shared_layer = keras_core.layers.Dense(1) + input_1 = keras.Input((2,)) + input_2 = keras.Input((2,)) + shared_layer = keras.layers.Dense(1) output_1 = shared_layer(input_1) wrapper_layer = WrapperLayer(shared_layer) output_2 = wrapper_layer(input_2) - model = keras_core.Model([input_1, input_2], [output_1, output_2]) + model = keras.Model([input_1, input_2], [output_1, output_2]) _, new_model, _ = self.roundtrip( model, custom_objects={"WrapperLayer": WrapperLayer} ) @@ -223,11 +223,11 @@ class SerializationLibTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_functional_subclass(self): - class PlainFunctionalSubclass(keras_core.Model): + class PlainFunctionalSubclass(keras.Model): pass - inputs = keras_core.Input((2,), batch_size=3) - outputs = keras_core.layers.Dense(1)(inputs) + inputs = keras.Input((2,), batch_size=3) + outputs = keras.layers.Dense(1)(inputs) model = PlainFunctionalSubclass(inputs, outputs) x = ops.random.normal((2, 2)) y1 = model(x) @@ -240,10 +240,10 @@ class SerializationLibTest(testing.TestCase): self.assertAllClose(y1, y2, atol=1e-5) self.assertIsInstance(new_model, PlainFunctionalSubclass) - class FunctionalSubclassWCustomInit(keras_core.Model): + class FunctionalSubclassWCustomInit(keras.Model): def __init__(self, num_units=2): - inputs = keras_core.Input((2,), batch_size=3) - outputs = keras_core.layers.Dense(num_units)(inputs) + inputs = keras.Input((2,), batch_size=3) + outputs = keras.layers.Dense(num_units)(inputs) super().__init__(inputs, outputs) self.num_units = num_units @@ -265,7 +265,7 @@ class SerializationLibTest(testing.TestCase): self.assertIsInstance(new_model, FunctionalSubclassWCustomInit) def test_shared_object(self): - class MyLayer(keras_core.layers.Layer): + class MyLayer(keras.layers.Layer): def __init__(self, activation, **kwargs): super().__init__(**kwargs) if isinstance(activation, dict): @@ -315,8 +315,8 @@ class SerializationLibTest(testing.TestCase): self.assertIs(new_layers[0].activation, new_layers[1].activation) -@keras_core.saving.register_keras_serializable() -class MyDense(keras_core.layers.Layer): +@keras.saving.register_keras_serializable() +class MyDense(keras.layers.Layer): def __init__( self, units, @@ -352,8 +352,8 @@ class MyDense(keras_core.layers.Layer): return ops.matmul(inputs, self._kernel) -@keras_core.saving.register_keras_serializable() -class MyWrapper(keras_core.layers.Layer): +@keras.saving.register_keras_serializable() +class MyWrapper(keras.layers.Layer): def __init__(self, wrapped, **kwargs): super().__init__(**kwargs) self._wrapped = wrapped @@ -363,7 +363,7 @@ class MyWrapper(keras_core.layers.Layer): @classmethod def from_config(cls, config): - config["wrapped"] = keras_core.saving.deserialize_keras_object( + config["wrapped"] = keras.saving.deserialize_keras_object( config["wrapped"] ) return cls(**config) diff --git a/keras/testing/__init__.py b/keras/testing/__init__.py new file mode 100644 index 000000000..895102c26 --- /dev/null +++ b/keras/testing/__init__.py @@ -0,0 +1 @@ +from keras.testing.test_case import TestCase diff --git a/keras_core/testing/test_case.py b/keras/testing/test_case.py similarity index 96% rename from keras_core/testing/test_case.py rename to keras/testing/test_case.py index a39a2b371..cfd876b95 100644 --- a/keras_core/testing/test_case.py +++ b/keras/testing/test_case.py @@ -6,14 +6,14 @@ import unittest import numpy as np import tree -from keras_core import backend -from keras_core import ops -from keras_core import utils -from keras_core.backend.common import is_float_dtype -from keras_core.backend.common import standardize_dtype -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.models import Model -from keras_core.utils import traceback_utils +from keras import backend +from keras import ops +from keras import utils +from keras.backend.common import is_float_dtype +from keras.backend.common import standardize_dtype +from keras.backend.common.keras_tensor import KerasTensor +from keras.models import Model +from keras.utils import traceback_utils class TestCase(unittest.TestCase): @@ -69,9 +69,9 @@ class TestCase(unittest.TestCase): self.assertEqual(len(iterable), expected_len, msg=msg) def run_class_serialization_test(self, instance, custom_objects=None): - from keras_core.saving import custom_object_scope - from keras_core.saving import deserialize_keras_object - from keras_core.saving import serialize_keras_object + from keras.saving import custom_object_scope + from keras.saving import deserialize_keras_object + from keras.saving import serialize_keras_object # get_config roundtrip cls = instance.__class__ @@ -421,7 +421,7 @@ def create_keras_tensors(input_shape, dtype, sparse): def create_eager_tensors(input_shape, dtype, sparse): - from keras_core.backend import random + from keras.backend import random if dtype not in [ "float16", diff --git a/keras_core/testing/test_utils.py b/keras/testing/test_utils.py similarity index 100% rename from keras_core/testing/test_utils.py rename to keras/testing/test_utils.py diff --git a/keras_core/trainers/__init__.py b/keras/trainers/__init__.py similarity index 100% rename from keras_core/trainers/__init__.py rename to keras/trainers/__init__.py diff --git a/keras_core/trainers/compile_utils.py b/keras/trainers/compile_utils.py similarity index 99% rename from keras_core/trainers/compile_utils.py rename to keras/trainers/compile_utils.py index 2bacb813b..9dbf20eb6 100644 --- a/keras_core/trainers/compile_utils.py +++ b/keras/trainers/compile_utils.py @@ -1,10 +1,10 @@ import tree -from keras_core import backend -from keras_core import losses as losses_module -from keras_core import metrics as metrics_module -from keras_core import ops -from keras_core.utils.naming import get_object_name +from keras import backend +from keras import losses as losses_module +from keras import metrics as metrics_module +from keras import ops +from keras.utils.naming import get_object_name class MetricsList(metrics_module.Metric): diff --git a/keras_core/trainers/compile_utils_test.py b/keras/trainers/compile_utils_test.py similarity index 97% rename from keras_core/trainers/compile_utils_test.py rename to keras/trainers/compile_utils_test.py index 4a365f4ab..58e3ec674 100644 --- a/keras_core/trainers/compile_utils_test.py +++ b/keras/trainers/compile_utils_test.py @@ -1,12 +1,12 @@ import numpy as np from absl.testing import parameterized -from keras_core import backend -from keras_core import metrics as losses_module -from keras_core import metrics as metrics_module -from keras_core import testing -from keras_core.trainers.compile_utils import CompileLoss -from keras_core.trainers.compile_utils import CompileMetrics +from keras import backend +from keras import metrics as losses_module +from keras import metrics as metrics_module +from keras import testing +from keras.trainers.compile_utils import CompileLoss +from keras.trainers.compile_utils import CompileMetrics class TestCompileMetrics(testing.TestCase): diff --git a/keras_core/trainers/data_adapters/__init__.py b/keras/trainers/data_adapters/__init__.py similarity index 100% rename from keras_core/trainers/data_adapters/__init__.py rename to keras/trainers/data_adapters/__init__.py diff --git a/keras_core/trainers/data_adapters/array_data_adapter.py b/keras/trainers/data_adapters/array_data_adapter.py similarity index 97% rename from keras_core/trainers/data_adapters/array_data_adapter.py rename to keras/trainers/data_adapters/array_data_adapter.py index 216a8fc01..aa5b7910f 100644 --- a/keras_core/trainers/data_adapters/array_data_adapter.py +++ b/keras/trainers/data_adapters/array_data_adapter.py @@ -3,10 +3,10 @@ import math import numpy as np import tree -from keras_core import backend -from keras_core.trainers.data_adapters import data_adapter_utils -from keras_core.trainers.data_adapters.data_adapter import DataAdapter -from keras_core.utils.nest import lists_to_tuples +from keras import backend +from keras.trainers.data_adapters import data_adapter_utils +from keras.trainers.data_adapters.data_adapter import DataAdapter +from keras.utils.nest import lists_to_tuples try: import pandas @@ -107,7 +107,7 @@ class ArrayDataAdapter(DataAdapter): yield tree.map_structure(lambda x: x[start:stop], inputs) def get_tf_dataset(self): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf inputs = self._inputs shuffle = self._shuffle @@ -301,7 +301,7 @@ def convert_to_arrays(arrays): elif isinstance(x, pandas.DataFrame): x = x.to_numpy() if is_tf_ragged_tensor(x): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf # Convert floats to floatx. if ( diff --git a/keras_core/trainers/data_adapters/array_data_adapter_test.py b/keras/trainers/data_adapters/array_data_adapter_test.py similarity index 98% rename from keras_core/trainers/data_adapters/array_data_adapter_test.py rename to keras/trainers/data_adapters/array_data_adapter_test.py index c5179d99e..4a0c94d8b 100644 --- a/keras_core/trainers/data_adapters/array_data_adapter_test.py +++ b/keras/trainers/data_adapters/array_data_adapter_test.py @@ -4,9 +4,9 @@ import pytest import tensorflow as tf from absl.testing import parameterized -from keras_core import backend -from keras_core import testing -from keras_core.trainers.data_adapters import array_data_adapter +from keras import backend +from keras import testing +from keras.trainers.data_adapters import array_data_adapter class TestArrayDataAdapter(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/trainers/data_adapters/data_adapter.py b/keras/trainers/data_adapters/data_adapter.py similarity index 100% rename from keras_core/trainers/data_adapters/data_adapter.py rename to keras/trainers/data_adapters/data_adapter.py diff --git a/keras_core/trainers/data_adapters/data_adapter_utils.py b/keras/trainers/data_adapters/data_adapter_utils.py similarity index 96% rename from keras_core/trainers/data_adapters/data_adapter_utils.py rename to keras/trainers/data_adapters/data_adapter_utils.py index 2a37bde52..0c2319a08 100644 --- a/keras_core/trainers/data_adapters/data_adapter_utils.py +++ b/keras/trainers/data_adapters/data_adapter_utils.py @@ -3,8 +3,8 @@ import math import numpy as np import tree -from keras_core import backend -from keras_core.api_export import keras_core_export +from keras import backend +from keras.api_export import keras_export try: import pandas @@ -17,14 +17,14 @@ except ImportError: # backend framework we are not currently using just to do type-checking. ARRAY_TYPES = (np.ndarray,) if backend.backend() == "tensorflow": - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf ARRAY_TYPES = ARRAY_TYPES + (np.ndarray, tf.RaggedTensor) if pandas: ARRAY_TYPES = ARRAY_TYPES + (pandas.Series, pandas.DataFrame) -@keras_core_export("keras_core.utils.unpack_x_y_sample_weight") +@keras_export("keras.utils.unpack_x_y_sample_weight") def unpack_x_y_sample_weight(data): """Unpacks user-provided data tuple. @@ -67,7 +67,7 @@ def unpack_x_y_sample_weight(data): raise ValueError(error_msg) -@keras_core_export("keras_core.utils.pack_x_y_sample_weight") +@keras_export("keras.utils.pack_x_y_sample_weight") def pack_x_y_sample_weight(x, y=None, sample_weight=None): """Packs user-provided data into a tuple. diff --git a/keras_core/trainers/data_adapters/generator_data_adapter.py b/keras/trainers/data_adapters/generator_data_adapter.py similarity index 91% rename from keras_core/trainers/data_adapters/generator_data_adapter.py rename to keras/trainers/data_adapters/generator_data_adapter.py index 0cff39204..fe3c8766e 100644 --- a/keras_core/trainers/data_adapters/generator_data_adapter.py +++ b/keras/trainers/data_adapters/generator_data_adapter.py @@ -2,7 +2,7 @@ import itertools import tree -from keras_core.trainers.data_adapters.data_adapter import DataAdapter +from keras.trainers.data_adapters.data_adapter import DataAdapter class GeneratorDataAdapter(DataAdapter): @@ -22,7 +22,7 @@ class GeneratorDataAdapter(DataAdapter): ) def _set_tf_output_signature(self): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf data, generator = peek_and_restore(self.generator) self.generator = generator @@ -47,7 +47,7 @@ class GeneratorDataAdapter(DataAdapter): yield batch def get_tf_dataset(self): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf if self._output_signature is None: self._set_tf_output_signature() diff --git a/keras_core/trainers/data_adapters/generator_data_adapter_test.py b/keras/trainers/data_adapters/generator_data_adapter_test.py similarity index 96% rename from keras_core/trainers/data_adapters/generator_data_adapter_test.py rename to keras/trainers/data_adapters/generator_data_adapter_test.py index 459607c30..8537c74c9 100644 --- a/keras_core/trainers/data_adapters/generator_data_adapter_test.py +++ b/keras/trainers/data_adapters/generator_data_adapter_test.py @@ -4,8 +4,8 @@ import numpy as np import tensorflow as tf from absl.testing import parameterized -from keras_core import testing -from keras_core.trainers.data_adapters import generator_data_adapter +from keras import testing +from keras.trainers.data_adapters import generator_data_adapter def example_generator(x, y, sample_weight=None, batch_size=32): diff --git a/keras_core/trainers/data_adapters/py_dataset_adapter.py b/keras/trainers/data_adapters/py_dataset_adapter.py similarity index 97% rename from keras_core/trainers/data_adapters/py_dataset_adapter.py rename to keras/trainers/data_adapters/py_dataset_adapter.py index c15a947ab..143582c99 100644 --- a/keras_core/trainers/data_adapters/py_dataset_adapter.py +++ b/keras/trainers/data_adapters/py_dataset_adapter.py @@ -10,12 +10,12 @@ from contextlib import closing import numpy as np import tree -from keras_core.api_export import keras_core_export -from keras_core.trainers.data_adapters import data_adapter_utils -from keras_core.trainers.data_adapters.data_adapter import DataAdapter +from keras.api_export import keras_export +from keras.trainers.data_adapters import data_adapter_utils +from keras.trainers.data_adapters.data_adapter import DataAdapter -@keras_core_export(["keras_core.utils.PyDataset", "keras_core.utils.Sequence"]) +@keras_export(["keras.utils.PyDataset", "keras.utils.Sequence"]) class PyDataset: """Base class for defining a parallel dataset using Python code. @@ -64,7 +64,7 @@ class PyDataset: # Here, `x_set` is list of path to the images # and `y_set` are the associated classes. - class CIFAR10PyDataset(keras_core.utils.PyDataset): + class CIFAR10PyDataset(keras.utils.PyDataset): def __init__(self, x_set, y_set, batch_size, **kwargs): super().__init__(**kwargs) @@ -173,7 +173,7 @@ class PyDataset: class PyDatasetAdapter(DataAdapter): - """Adapter for `keras_core.utils.PyDataset` instances.""" + """Adapter for `keras.utils.PyDataset` instances.""" def __init__( self, @@ -188,7 +188,7 @@ class PyDatasetAdapter(DataAdapter): self._output_signature = None def _set_tf_output_signature(self): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf def get_tensor_spec(x): shape = x.shape @@ -273,7 +273,7 @@ class PyDatasetAdapter(DataAdapter): self.enqueuer.stop() def get_tf_dataset(self): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf if self._output_signature is None: self._set_tf_output_signature() @@ -483,7 +483,7 @@ class OrderedEnqueuer(PyDatasetEnqueuer): """Builds a Enqueuer from a PyDataset. Args: - py_dataset: A `keras_core.utils.PyDataset` object. + py_dataset: A `keras.utils.PyDataset` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch """ diff --git a/keras_core/trainers/data_adapters/py_dataset_adapter_test.py b/keras/trainers/data_adapters/py_dataset_adapter_test.py similarity index 96% rename from keras_core/trainers/data_adapters/py_dataset_adapter_test.py rename to keras/trainers/data_adapters/py_dataset_adapter_test.py index 71c1a7612..369188be3 100644 --- a/keras_core/trainers/data_adapters/py_dataset_adapter_test.py +++ b/keras/trainers/data_adapters/py_dataset_adapter_test.py @@ -5,9 +5,9 @@ import numpy as np import tensorflow as tf from absl.testing import parameterized -from keras_core import testing -from keras_core.trainers.data_adapters import py_dataset_adapter -from keras_core.utils.rng_utils import set_random_seed +from keras import testing +from keras.trainers.data_adapters import py_dataset_adapter +from keras.utils.rng_utils import set_random_seed class ExamplePyDataset(py_dataset_adapter.PyDataset): diff --git a/keras_core/trainers/data_adapters/tf_dataset_adapter.py b/keras/trainers/data_adapters/tf_dataset_adapter.py similarity index 91% rename from keras_core/trainers/data_adapters/tf_dataset_adapter.py rename to keras/trainers/data_adapters/tf_dataset_adapter.py index 0169614fe..24370624c 100644 --- a/keras_core/trainers/data_adapters/tf_dataset_adapter.py +++ b/keras/trainers/data_adapters/tf_dataset_adapter.py @@ -1,14 +1,14 @@ import tree -from keras_core.trainers.data_adapters import data_adapter_utils -from keras_core.trainers.data_adapters.data_adapter import DataAdapter +from keras.trainers.data_adapters import data_adapter_utils +from keras.trainers.data_adapters.data_adapter import DataAdapter class TFDatasetAdapter(DataAdapter): """Adapter that handles `tf.data.Dataset`.""" def __init__(self, dataset, class_weight=None): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf if not isinstance(dataset, tf.data.Dataset): raise ValueError( @@ -64,7 +64,7 @@ def make_class_weight_map_fn(class_weight): A function that can be used with `tf.data.Dataset.map` to apply class weighting. """ - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf class_weight_tensor = tf.convert_to_tensor( [ diff --git a/keras_core/trainers/data_adapters/tf_dataset_adapter_test.py b/keras/trainers/data_adapters/tf_dataset_adapter_test.py similarity index 98% rename from keras_core/trainers/data_adapters/tf_dataset_adapter_test.py rename to keras/trainers/data_adapters/tf_dataset_adapter_test.py index f9e92ed19..63b6900ff 100644 --- a/keras_core/trainers/data_adapters/tf_dataset_adapter_test.py +++ b/keras/trainers/data_adapters/tf_dataset_adapter_test.py @@ -1,8 +1,8 @@ import numpy as np import tensorflow as tf -from keras_core import testing -from keras_core.trainers.data_adapters import tf_dataset_adapter +from keras import testing +from keras.trainers.data_adapters import tf_dataset_adapter class TestTFDatasetAdapter(testing.TestCase): diff --git a/keras_core/trainers/data_adapters/torch_data_adapter.py b/keras/trainers/data_adapters/torch_data_adapter.py similarity index 92% rename from keras_core/trainers/data_adapters/torch_data_adapter.py rename to keras/trainers/data_adapters/torch_data_adapter.py index 85fa6835b..c5f2ea13a 100644 --- a/keras_core/trainers/data_adapters/torch_data_adapter.py +++ b/keras/trainers/data_adapters/torch_data_adapter.py @@ -1,6 +1,6 @@ import tree -from keras_core.trainers.data_adapters.data_adapter import DataAdapter +from keras.trainers.data_adapters.data_adapter import DataAdapter class TorchDataLoaderAdapter(DataAdapter): @@ -28,7 +28,7 @@ class TorchDataLoaderAdapter(DataAdapter): return self._dataloader def get_tf_dataset(self): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf output_signature = self.peek_and_get_tensor_spec() return tf.data.Dataset.from_generator( @@ -37,7 +37,7 @@ class TorchDataLoaderAdapter(DataAdapter): ) def peek_and_get_tensor_spec(self): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf batch_data = next(iter(self._dataloader)) diff --git a/keras_core/trainers/data_adapters/torch_data_adapter_test.py b/keras/trainers/data_adapters/torch_data_adapter_test.py similarity index 95% rename from keras_core/trainers/data_adapters/torch_data_adapter_test.py rename to keras/trainers/data_adapters/torch_data_adapter_test.py index 89de7c86a..57a689503 100644 --- a/keras_core/trainers/data_adapters/torch_data_adapter_test.py +++ b/keras/trainers/data_adapters/torch_data_adapter_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest import tensorflow as tf -from keras_core import backend -from keras_core import testing -from keras_core.trainers.data_adapters.torch_data_adapter import ( +from keras import backend +from keras import testing +from keras.trainers.data_adapters.torch_data_adapter import ( TorchDataLoaderAdapter, ) diff --git a/keras_core/trainers/epoch_iterator.py b/keras/trainers/epoch_iterator.py similarity index 95% rename from keras_core/trainers/epoch_iterator.py rename to keras/trainers/epoch_iterator.py index 2f64ad181..7ce0e3aa8 100644 --- a/keras_core/trainers/epoch_iterator.py +++ b/keras/trainers/epoch_iterator.py @@ -41,12 +41,12 @@ or until there is no data import types import warnings -from keras_core.trainers.data_adapters import array_data_adapter -from keras_core.trainers.data_adapters import generator_data_adapter -from keras_core.trainers.data_adapters import py_dataset_adapter -from keras_core.trainers.data_adapters import tf_dataset_adapter -from keras_core.trainers.data_adapters import torch_data_adapter -from keras_core.utils.module_utils import tensorflow as tf +from keras.trainers.data_adapters import array_data_adapter +from keras.trainers.data_adapters import generator_data_adapter +from keras.trainers.data_adapters import py_dataset_adapter +from keras.trainers.data_adapters import tf_dataset_adapter +from keras.trainers.data_adapters import torch_data_adapter +from keras.utils.module_utils import tensorflow as tf class EpochIterator: diff --git a/keras_core/trainers/epoch_iterator_test.py b/keras/trainers/epoch_iterator_test.py similarity index 98% rename from keras_core/trainers/epoch_iterator_test.py rename to keras/trainers/epoch_iterator_test.py index ef1ed9267..09ef885cd 100644 --- a/keras_core/trainers/epoch_iterator_test.py +++ b/keras/trainers/epoch_iterator_test.py @@ -2,9 +2,9 @@ import numpy as np import pytest import tensorflow as tf -from keras_core import backend -from keras_core import testing -from keras_core.trainers import epoch_iterator +from keras import backend +from keras import testing +from keras.trainers import epoch_iterator class TestEpochIterator(testing.TestCase): diff --git a/keras_core/trainers/trainer.py b/keras/trainers/trainer.py similarity index 93% rename from keras_core/trainers/trainer.py rename to keras/trainers/trainer.py index 969ee9401..8606b4cbf 100644 --- a/keras_core/trainers/trainer.py +++ b/keras/trainers/trainer.py @@ -1,16 +1,16 @@ import platform import warnings -from keras_core import backend -from keras_core import metrics as metrics_module -from keras_core import ops -from keras_core import optimizers -from keras_core.optimizers.loss_scale_optimizer import LossScaleOptimizer -from keras_core.saving import serialization_lib -from keras_core.trainers.compile_utils import CompileLoss -from keras_core.trainers.compile_utils import CompileMetrics -from keras_core.utils import traceback_utils -from keras_core.utils import tracking +from keras import backend +from keras import metrics as metrics_module +from keras import ops +from keras import optimizers +from keras.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.saving import serialization_lib +from keras.trainers.compile_utils import CompileLoss +from keras.trainers.compile_utils import CompileMetrics +from keras.utils import traceback_utils +from keras.utils import tracking class Trainer: @@ -42,20 +42,20 @@ class Trainer: ```python model.compile( - optimizer=keras_core.optimizers.Adam(learning_rate=1e-3), - loss=keras_core.losses.BinaryCrossentropy(), + optimizer=keras.optimizers.Adam(learning_rate=1e-3), + loss=keras.losses.BinaryCrossentropy(), metrics=[ - keras_core.metrics.BinaryAccuracy(), - keras_core.metrics.FalseNegatives(), + keras.metrics.BinaryAccuracy(), + keras.metrics.FalseNegatives(), ], ) ``` Args: optimizer: String (name of optimizer) or optimizer instance. See - `keras_core.optimizers`. + `keras.optimizers`. loss: Loss function. May be a string (name of loss function), or - a `keras_core.losses.Loss` instance. See `keras_core.losses`. A + a `keras.losses.Loss` instance. See `keras.losses`. A loss function is any callable with the signature `loss = fn(y_true, y_pred)`, where `y_true` are the ground truth values, and `y_pred` are the model's predictions. @@ -75,8 +75,8 @@ class Trainer: coefficients. metrics: List of metrics to be evaluated by the model during training and testing. Each of this can be a string (name of a - built-in function), function or a `keras_core.metrics.Metric` - instance. See `keras_core.metrics`. Typically you will use + built-in function), function or a `keras.metrics.Metric` + instance. See `keras.metrics`. Typically you will use `metrics=['accuracy']`. A function is any callable with the signature `result = fn(y_true, _pred)`. To specify different metrics for different outputs of a multi-output model, you could @@ -87,9 +87,9 @@ class Trainer: `metrics=[['accuracy'], ['accuracy', 'mse']]` or `metrics=['accuracy', ['accuracy', 'mse']]`. When you pass the strings 'accuracy' or 'acc', we convert this to one of - `keras_core.metrics.BinaryAccuracy`, - `keras_core.metrics.CategoricalAccuracy`, - `keras_core.metrics.SparseCategoricalAccuracy` based on the + `keras.metrics.BinaryAccuracy`, + `keras.metrics.CategoricalAccuracy`, + `keras.metrics.SparseCategoricalAccuracy` based on the shapes of the targets and of the model output. A similar conversion is done for the strings `"crossentropy"` and `"ce"` as well. @@ -345,7 +345,7 @@ class Trainer: Returns: A `dict` containing values that will be passed to - `keras_core.callbacks.CallbackList.on_train_batch_end()`. Typically, + `keras.callbacks.CallbackList.on_train_batch_end()`. Typically, the values of the metrics listed in `self.metrics` are returned. Example: `{'loss': 0.2, 'accuracy': 0.7}`. """ @@ -405,18 +405,18 @@ class Trainer: - A `tf.data.Dataset`. Should return a tuple of either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - - A `keras_core.utils.PyDataset` returning `(inputs, + - A `keras.utils.PyDataset` returning `(inputs, targets)` or `(inputs, targets, sample_weights)`. y: Target data. Like the input data `x`, it could be either NumPy array(s) or backend-native tensor(s). If `x` is a dataset, generator, - or `keras_core.utils.PyDataset` instance, `y` should + or `keras.utils.PyDataset` instance, `y` should not be specified (since targets will be obtained from `x`). batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the - form of datasets, generators, or `keras_core.utils.PyDataset` + form of datasets, generators, or `keras.utils.PyDataset` instances (since they generate batches). epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` @@ -435,13 +435,13 @@ class Trainer: particularly useful when logged to a file, so `verbose=2` is recommended when not running interactively (e.g., in a production environment). Defaults to `"auto"`. - callbacks: List of `keras_core.callbacks.Callback` instances. + callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. - See `keras_core.callbacks`. Note - `keras_core.callbacks.ProgbarLogger` and - `keras_core.callbacks.History` callbacks are created + See `keras.callbacks`. Note + `keras.callbacks.ProgbarLogger` and + `keras.callbacks.History` callbacks are created automatically and need not be passed to `model.fit()`. - `keras_core.callbacks.ProgbarLogger` is created + `keras.callbacks.ProgbarLogger` is created or not based on the `verbose` argument in `model.fit()`. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. @@ -452,7 +452,7 @@ class Trainer: The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. This argument is not supported when `x` is a dataset, generator or - `keras_core.utils.PyDataset` instance. + `keras.utils.PyDataset` instance. If both `validation_data` and `validation_split` are provided, `validation_data` will override `validation_split`. validation_data: Data on which to evaluate @@ -467,7 +467,7 @@ class Trainer: - A tuple `(x_val, y_val, val_sample_weights)` of NumPy arrays. - A `tf.data.Dataset`. - - A Python generator or `keras_core.utils.PyDataset` returning + - A Python generator or `keras.utils.PyDataset` returning `(inputs, targets)` or `(inputs, targets, sample_weights)`. shuffle: Boolean, whether to shuffle the training data before each epoch. This argument is @@ -491,7 +491,7 @@ class Trainer: `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. This argument is not supported when `x` is a dataset, generator, - or `keras_core.utils.PyDataset` instance, instead provide the + or `keras.utils.PyDataset` instance, instead provide the sample_weights as the third element of `x`. Note that sample weighting does not apply to metrics specified via the `metrics` argument in `compile()`. To apply sample @@ -528,7 +528,7 @@ class Trainer: Number of samples per validation batch. If unspecified, will default to `batch_size`. Do not specify the `validation_batch_size` if your data is in - the form of datasets or `keras_core.utils.PyDataset` + the form of datasets or `keras.utils.PyDataset` instances (since they generate batches). validation_freq: Only relevant if validation data is provided. Specifies how many training epochs to run @@ -537,7 +537,7 @@ class Trainer: Unpacking behavior for iterator-like inputs: A common pattern is to pass an iterator like object such as a - `tf.data.Dataset` or a `keras_core.utils.PyDataset` to `fit()`, + `tf.data.Dataset` or a `keras.utils.PyDataset` to `fit()`, which will in fact yield not only features (`x`) but optionally targets (`y`) and sample weights (`sample_weight`). Keras requires that the output of such iterator-likes be @@ -596,17 +596,17 @@ class Trainer: - A `tf.data.Dataset`. Should return a tuple of either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - - A generator or `keras_core.utils.PyDataset` returning + - A generator or `keras.utils.PyDataset` returning `(inputs, targets)` or `(inputs, targets, sample_weights)`. y: Target data. Like the input data `x`, it could be either NumPy array(s) or backend-native tensor(s). - If `x` is a `tf.data.Dataset` or `keras_core.utils.PyDataset` + If `x` is a `tf.data.Dataset` or `keras.utils.PyDataset` instance, `y` should not be specified (since targets will be obtained from the iterator/dataset). batch_size: Integer or `None`. Number of samples per batch of computation. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of a - dataset, generators, or `keras_core.utils.PyDataset` instances + dataset, generators, or `keras.utils.PyDataset` instances (since they generate batches). verbose: `"auto"`, 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = single line. @@ -629,7 +629,7 @@ class Trainer: default value of `None`. If `x` is a `tf.data.Dataset` and `steps` is `None`, evaluation will run until the dataset is exhausted. - callbacks: List of `keras_core.callbacks.Callback` instances. + callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during evaluation. return_dict: If `True`, loss and metric results are returned as a dict, with each key being the name of the metric. @@ -671,12 +671,12 @@ class Trainer: - A tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data.Dataset`. - - A `keras_core.utils.PyDataset` instance. + - A `keras.utils.PyDataset` instance. batch_size: Integer or `None`. Number of samples per batch. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the - form of dataset, generators, or `keras_core.utils.PyDataset` + form of dataset, generators, or `keras.utils.PyDataset` instances (since they generate batches). verbose: `"auto"`, 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = single line. @@ -689,7 +689,7 @@ class Trainer: Ignored with the default value of `None`. If `x` is a `tf.data.Dataset` and `steps` is `None`, `predict()` will run until the input dataset is exhausted. - callbacks: List of `keras_core.callbacks.Callback` instances. + callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during prediction. Returns: @@ -878,7 +878,7 @@ def resolve_auto_jit_compile(model): def model_supports_jit(model): if platform.system() == "Darwin" and "arm" in platform.processor().lower(): if backend.backend() == "tensorflow": - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf if tf.config.list_physical_devices("GPU"): return False diff --git a/keras_core/trainers/trainer_test.py b/keras/trainers/trainer_test.py similarity index 95% rename from keras_core/trainers/trainer_test.py rename to keras/trainers/trainer_test.py index 10e9705cf..649f7022f 100644 --- a/keras_core/trainers/trainer_test.py +++ b/keras/trainers/trainer_test.py @@ -2,28 +2,28 @@ import numpy as np import pytest from absl.testing import parameterized -import keras_core -from keras_core import backend -from keras_core import initializers -from keras_core import layers -from keras_core import losses -from keras_core import metrics -from keras_core import ops -from keras_core import optimizers -from keras_core import testing -from keras_core.callbacks.callback import Callback -from keras_core.optimizers.rmsprop import RMSprop +import keras +from keras import backend +from keras import initializers +from keras import layers +from keras import losses +from keras import metrics +from keras import ops +from keras import optimizers +from keras import testing +from keras.callbacks.callback import Callback +from keras.optimizers.rmsprop import RMSprop if backend.backend() == "jax": - from keras_core.backend.jax.trainer import JAXTrainer as Trainer + from keras.backend.jax.trainer import JAXTrainer as Trainer elif backend.backend() == "torch": - from keras_core.backend.torch.trainer import TorchTrainer as Trainer + from keras.backend.torch.trainer import TorchTrainer as Trainer elif backend.backend() == "tensorflow": - from keras_core.backend.tensorflow.trainer import ( + from keras.backend.tensorflow.trainer import ( TensorFlowTrainer as Trainer, ) elif backend.backend() == "numpy": - from keras_core.backend.numpy.trainer import NumpyTrainer as Trainer + from keras.backend.numpy.trainer import NumpyTrainer as Trainer else: raise ImportError(f"Invalid backend: {backend.backend()}") @@ -563,9 +563,9 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): self.assertAlmostEqual(logs["loss"], 16.0) def test_nested_input_predict(self): - # https://github.com/keras-team/keras-core/issues/325 + # https://github.com/keras-team/keras/issues/325 - class TupleInputModel(keras_core.Model): + class TupleInputModel(keras.Model): def call(self, inputs): a, b = inputs return a + b @@ -575,7 +575,7 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): out = model.predict((x1, x2)) self.assertEqual(out.shape, (3, 4)) - class DictInputModel(keras_core.Model): + class DictInputModel(keras.Model): def call(self, inputs): return inputs["a"] + inputs["b"] @@ -678,7 +678,7 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): self.add_loss(ops.sum(x)) return x - model = keras_core.Sequential( + model = keras.Sequential( [ layers.Dense(2), LossLayer(), @@ -691,14 +691,14 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): model.fit(x, y, batch_size=4) def get_layer(self): - class ExampleLayer(keras_core.Layer): + class ExampleLayer(keras.Layer): def call(self, x): return x * 2 return ExampleLayer def get_model(self): - class ExampleModel(keras_core.Model): + class ExampleModel(keras.Model): def call(self, x): return x * 2 @@ -707,9 +707,9 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): def get_functional(self): ExampleLayer = self.get_layer() - class ExampleFunctional(keras_core.Functional): + class ExampleFunctional(keras.Functional): def __init__(self, input_shape=(None,)): - inputs = keras_core.Input(input_shape) + inputs = keras.Input(input_shape) outputs = ExampleLayer()(inputs) super().__init__(inputs=inputs, outputs=outputs) @@ -733,11 +733,11 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): ) @pytest.mark.requires_trainable_backend @pytest.mark.skipif( - keras_core.backend.backend() != "tensorflow", + keras.backend.backend() != "tensorflow", reason="Only tensorflow supports raggeds", ) def test_trainer_with_raggeds(self, model_class): - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf def loss_fn(y, y_pred, sample_weight=None): return 0 @@ -757,7 +757,7 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): self.assertEqual(type(y), tf.RaggedTensor) # test if everything works with the sequential model - model = keras_core.Sequential([model]) + model = keras.Sequential([model]) model.compile(optimizer="adam", loss=loss_fn) model.fit(x, x) y = model.predict(x) @@ -769,7 +769,7 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): inputs = layers.Input((20,)) outputs = layers.Dropout(0.5, seed=1337)(inputs, training=True) - model = keras_core.Model(inputs, outputs) + model = keras.Model(inputs, outputs) out1 = model.predict(np.ones((4, 20)), batch_size=2) self.assertGreater(5, np.sum(np.abs(out1[:2, :] - out1[2:4, :]))) @@ -781,7 +781,7 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): def test_recompile(self): inputs = layers.Input((2,)) outputs = layers.Dense(3)(inputs) - model = keras_core.Model(inputs, outputs) + model = keras.Model(inputs, outputs) model.compile( optimizer="sgd", loss="mse", metrics=["mean_squared_error"] ) @@ -843,10 +843,10 @@ class TestTrainer(testing.TestCase, parameterized.TestCase): # validation only runs for the correct number of steps. inputs = layers.Input((2,)) outputs = layers.Dense(3)(inputs) - model = keras_core.Model(inputs, outputs) + model = keras.Model(inputs, outputs) model.compile(optimizer="sgd", loss="mse", metrics=["mse"]) - class Recorder(keras_core.callbacks.Callback): + class Recorder(keras.callbacks.Callback): def __init__(self): self.train_counter = 0 self.val_counter = 0 diff --git a/keras/utils/__init__.py b/keras/utils/__init__.py new file mode 100644 index 000000000..8ab290a98 --- /dev/null +++ b/keras/utils/__init__.py @@ -0,0 +1,26 @@ +from keras.utils.audio_dataset_utils import audio_dataset_from_directory +from keras.utils.dataset_utils import split_dataset +from keras.utils.file_utils import get_file +from keras.utils.image_dataset_utils import image_dataset_from_directory +from keras.utils.image_utils import array_to_img +from keras.utils.image_utils import img_to_array +from keras.utils.image_utils import load_img +from keras.utils.image_utils import save_img +from keras.utils.io_utils import disable_interactive_logging +from keras.utils.io_utils import enable_interactive_logging +from keras.utils.io_utils import is_interactive_logging_enabled +from keras.utils.model_visualization import model_to_dot +from keras.utils.model_visualization import plot_model +from keras.utils.numerical_utils import normalize +from keras.utils.numerical_utils import to_categorical +from keras.utils.progbar import Progbar +from keras.utils.python_utils import default +from keras.utils.python_utils import is_default +from keras.utils.python_utils import removeprefix +from keras.utils.python_utils import removesuffix +from keras.utils.rng_utils import set_random_seed +from keras.utils.sequence_utils import pad_sequences +from keras.utils.text_dataset_utils import text_dataset_from_directory +from keras.utils.timeseries_dataset_utils import ( + timeseries_dataset_from_array, +) diff --git a/keras_core/utils/argument_validation.py b/keras/utils/argument_validation.py similarity index 100% rename from keras_core/utils/argument_validation.py rename to keras/utils/argument_validation.py diff --git a/keras_core/utils/audio_dataset_utils.py b/keras/utils/audio_dataset_utils.py similarity index 97% rename from keras_core/utils/audio_dataset_utils.py rename to keras/utils/audio_dataset_utils.py index 170cc7867..54fa928be 100644 --- a/keras_core/utils/audio_dataset_utils.py +++ b/keras/utils/audio_dataset_utils.py @@ -1,14 +1,14 @@ import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils import dataset_utils -from keras_core.utils.module_utils import tensorflow as tf -from keras_core.utils.module_utils import tensorflow_io as tfio +from keras.api_export import keras_export +from keras.utils import dataset_utils +from keras.utils.module_utils import tensorflow as tf +from keras.utils.module_utils import tensorflow_io as tfio ALLOWED_FORMATS = (".wav",) -@keras_core_export("keras_core.utils.audio_dataset_from_directory") +@keras_export("keras.utils.audio_dataset_from_directory") def audio_dataset_from_directory( directory, labels="inferred", diff --git a/keras_core/utils/audio_dataset_utils_test.py b/keras/utils/audio_dataset_utils_test.py similarity index 99% rename from keras_core/utils/audio_dataset_utils_test.py rename to keras/utils/audio_dataset_utils_test.py index 5a496b18e..5b5bc55e1 100644 --- a/keras_core/utils/audio_dataset_utils_test.py +++ b/keras/utils/audio_dataset_utils_test.py @@ -2,9 +2,9 @@ import os import numpy as np -from keras_core import testing -from keras_core.utils import audio_dataset_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import testing +from keras.utils import audio_dataset_utils +from keras.utils.module_utils import tensorflow as tf class AudioDatasetFromDirectoryTest(testing.TestCase): diff --git a/keras_core/utils/backend_utils.py b/keras/utils/backend_utils.py similarity index 74% rename from keras_core/utils/backend_utils.py rename to keras/utils/backend_utils.py index 101a24b8b..fc42bafb0 100644 --- a/keras_core/utils/backend_utils.py +++ b/keras/utils/backend_utils.py @@ -1,7 +1,7 @@ import sys -from keras_core import backend as backend_module -from keras_core.backend.common import global_state +from keras import backend as backend_module +from keras.backend.common import global_state def in_tf_graph(): @@ -9,7 +9,7 @@ def in_tf_graph(): return True if "tensorflow" in sys.modules: - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf return not tf.executing_eagerly() return False @@ -57,22 +57,22 @@ class DynamicBackend: def __getattr__(self, name): if self._backend == "tensorflow": - from keras_core.backend import tensorflow as tf_backend + from keras.backend import tensorflow as tf_backend return getattr(tf_backend, name) if self._backend == "jax": - from keras_core.backend import jax as jax_backend + from keras.backend import jax as jax_backend return getattr(jax_backend, name) if self._backend == "torch": - from keras_core.backend import torch as torch_backend + from keras.backend import torch as torch_backend return getattr(torch_backend, name) if self._backend == "numpy": # TODO (ariG23498): - # The import `from keras_core.backend import numpy as numpy_backend` + # The import `from keras.backend import numpy as numpy_backend` # is not working. This is a temporary fix. - # The import is redirected to `keras_core.backend.numpy.numpy.py` - from keras_core import backend as numpy_backend + # The import is redirected to `keras.backend.numpy.numpy.py` + from keras import backend as numpy_backend return getattr(numpy_backend, name) diff --git a/keras_core/utils/code_stats.py b/keras/utils/code_stats.py similarity index 100% rename from keras_core/utils/code_stats.py rename to keras/utils/code_stats.py diff --git a/keras_core/utils/code_stats_test.py b/keras/utils/code_stats_test.py similarity index 98% rename from keras_core/utils/code_stats_test.py rename to keras/utils/code_stats_test.py index 512303abc..bc1ccec8f 100644 --- a/keras_core/utils/code_stats_test.py +++ b/keras/utils/code_stats_test.py @@ -2,8 +2,8 @@ import os import sys from io import StringIO -from keras_core.testing import test_case -from keras_core.utils.code_stats import count_loc +from keras.testing import test_case +from keras.utils.code_stats import count_loc class TestCountLoc(test_case.TestCase): diff --git a/keras_core/utils/dataset_utils.py b/keras/utils/dataset_utils.py similarity index 98% rename from keras_core/utils/dataset_utils.py rename to keras/utils/dataset_utils.py index 9ad00c895..a35d1de99 100644 --- a/keras_core/utils/dataset_utils.py +++ b/keras/utils/dataset_utils.py @@ -6,12 +6,12 @@ import warnings import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils import io_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras.api_export import keras_export +from keras.utils import io_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.utils.split_dataset") +@keras_export("keras.utils.split_dataset") def split_dataset( dataset, left_size=None, right_size=None, shuffle=False, seed=None ): @@ -42,7 +42,7 @@ def split_dataset( Example: >>> data = np.random.random(size=(1000, 4)) - >>> left_ds, right_ds = keras_core.utils.split_dataset(data, left_size=0.8) + >>> left_ds, right_ds = keras.utils.split_dataset(data, left_size=0.8) >>> int(left_ds.cardinality()) 800 >>> int(right_ds.cardinality()) diff --git a/keras_core/utils/dataset_utils_test.py b/keras/utils/dataset_utils_test.py similarity index 98% rename from keras_core/utils/dataset_utils_test.py rename to keras/utils/dataset_utils_test.py index 13939aa44..9d044b192 100644 --- a/keras_core/utils/dataset_utils_test.py +++ b/keras/utils/dataset_utils_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras_core.testing import test_case -from keras_core.utils.dataset_utils import split_dataset -from keras_core.utils.module_utils import tensorflow as tf +from keras.testing import test_case +from keras.utils.dataset_utils import split_dataset +from keras.utils.module_utils import tensorflow as tf class DatasetUtilsTest(test_case.TestCase): diff --git a/keras_core/utils/dtype_utils.py b/keras/utils/dtype_utils.py similarity index 96% rename from keras_core/utils/dtype_utils.py rename to keras/utils/dtype_utils.py index 9c2992f44..cf6f54891 100644 --- a/keras_core/utils/dtype_utils.py +++ b/keras/utils/dtype_utils.py @@ -1,5 +1,5 @@ -from keras_core import backend -from keras_core import ops +from keras import backend +from keras import ops DTYPE_TO_SIZE = { **{f"float{i}": i for i in (16, 32, 64)}, diff --git a/keras_core/utils/dtype_utils_test.py b/keras/utils/dtype_utils_test.py similarity index 97% rename from keras_core/utils/dtype_utils_test.py rename to keras/utils/dtype_utils_test.py index 33cb95b31..29f2ef984 100644 --- a/keras_core/utils/dtype_utils_test.py +++ b/keras/utils/dtype_utils_test.py @@ -1,6 +1,6 @@ -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.testing import test_case -from keras_core.utils import dtype_utils +from keras.backend.common.keras_tensor import KerasTensor +from keras.testing import test_case +from keras.utils import dtype_utils class DtypeSizeTests(test_case.TestCase): diff --git a/keras_core/utils/file_utils.py b/keras/utils/file_utils.py similarity index 98% rename from keras_core/utils/file_utils.py rename to keras/utils/file_utils.py index 0c9be9eda..e6963dea2 100644 --- a/keras_core/utils/file_utils.py +++ b/keras/utils/file_utils.py @@ -9,11 +9,11 @@ import warnings import zipfile from urllib.request import urlretrieve -from keras_core.api_export import keras_core_export -from keras_core.backend import config -from keras_core.utils import io_utils -from keras_core.utils.module_utils import gfile -from keras_core.utils.progbar import Progbar +from keras.api_export import keras_export +from keras.backend import config +from keras.utils import io_utils +from keras.utils.module_utils import gfile +from keras.utils.progbar import Progbar def path_to_string(path): @@ -127,7 +127,7 @@ def extract_archive(file_path, path=".", archive_format="auto"): return False -@keras_core_export("keras_core.utils.get_file") +@keras_export("keras.utils.get_file") def get_file( fname=None, origin=None, diff --git a/keras_core/utils/file_utils_test.py b/keras/utils/file_utils_test.py similarity index 99% rename from keras_core/utils/file_utils_test.py rename to keras/utils/file_utils_test.py index 3cb95da0a..214c19de4 100644 --- a/keras_core/utils/file_utils_test.py +++ b/keras/utils/file_utils_test.py @@ -8,8 +8,8 @@ import urllib import zipfile from unittest.mock import patch -from keras_core.testing import test_case -from keras_core.utils import file_utils +from keras.testing import test_case +from keras.utils import file_utils class PathToStringTest(test_case.TestCase): diff --git a/keras_core/utils/image_dataset_utils.py b/keras/utils/image_dataset_utils.py similarity index 96% rename from keras_core/utils/image_dataset_utils.py rename to keras/utils/image_dataset_utils.py index f421d64ed..e643da331 100644 --- a/keras_core/utils/image_dataset_utils.py +++ b/keras/utils/image_dataset_utils.py @@ -1,18 +1,18 @@ import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.backend.config import standardize_data_format -from keras_core.utils import dataset_utils -from keras_core.utils import image_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras.api_export import keras_export +from keras.backend.config import standardize_data_format +from keras.utils import dataset_utils +from keras.utils import image_utils +from keras.utils.module_utils import tensorflow as tf ALLOWLIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png") -@keras_core_export( +@keras_export( [ - "keras_core.utils.image_dataset_from_directory", - "keras_core.preprocessing.image_dataset_from_directory", + "keras.utils.image_dataset_from_directory", + "keras.preprocessing.image_dataset_from_directory", ] ) def image_dataset_from_directory( @@ -113,7 +113,7 @@ def image_dataset_from_directory( (of size `image_size`) that matches the target aspect ratio. By default (`crop_to_aspect_ratio=False`), aspect ratio may not be preserved. - data_format: If None uses keras_core.config.image_data_format() + data_format: If None uses keras.config.image_data_format() otherwise either 'channel_last' or 'channel_first'. Returns: @@ -387,7 +387,7 @@ def load_image( img, channels=num_channels, expand_animations=False ) if crop_to_aspect_ratio: - from keras_core.backend import tensorflow as tf_backend + from keras.backend import tensorflow as tf_backend img = image_utils.smart_resize( img, diff --git a/keras_core/utils/image_dataset_utils_test.py b/keras/utils/image_dataset_utils_test.py similarity index 98% rename from keras_core/utils/image_dataset_utils_test.py rename to keras/utils/image_dataset_utils_test.py index 6c7fec530..79923aa92 100644 --- a/keras_core/utils/image_dataset_utils_test.py +++ b/keras/utils/image_dataset_utils_test.py @@ -2,10 +2,10 @@ import os import numpy as np -from keras_core import testing -from keras_core.utils import image_dataset_utils -from keras_core.utils import image_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras import testing +from keras.utils import image_dataset_utils +from keras.utils import image_utils +from keras.utils.module_utils import tensorflow as tf class ImageDatasetFromDirectoryTest(testing.TestCase): diff --git a/keras_core/utils/image_utils.py b/keras/utils/image_utils.py similarity index 93% rename from keras_core/utils/image_utils.py rename to keras/utils/image_utils.py index 458b54578..cb4d982ea 100644 --- a/keras_core/utils/image_utils.py +++ b/keras/utils/image_utils.py @@ -6,8 +6,8 @@ import warnings import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export +from keras import backend +from keras.api_export import keras_export try: from PIL import Image as pil_image @@ -32,10 +32,10 @@ if pil_image_resampling is not None: } -@keras_core_export( +@keras_export( [ - "keras_core.utils.array_to_img", - "keras_core.preprocessing.image.array_to_img", + "keras.utils.array_to_img", + "keras.preprocessing.image.array_to_img", ] ) def array_to_img(x, data_format=None, scale=True, dtype=None): @@ -46,19 +46,19 @@ def array_to_img(x, data_format=None, scale=True, dtype=None): ```python from PIL import Image img = np.random.random(size=(100, 100, 3)) - pil_img = keras_core.utils.array_to_img(img) + pil_img = keras.utils.array_to_img(img) ``` Args: x: Input data, in any form that can be converted to a NumPy array. data_format: Image data format, can be either `"channels_first"` or `"channels_last"`. Defaults to `None`, in which case the global - setting `keras_core.backend.image_data_format()` is used (unless you + setting `keras.backend.image_data_format()` is used (unless you changed it, it defaults to `"channels_last"`). scale: Whether to rescale the image such that minimum and maximum values are 0 and 255 respectively. Defaults to `True`. dtype: Dtype to use. `None` means the global setting - `keras_core.backend.floatx()` is used (unless you changed it, it + `keras.backend.floatx()` is used (unless you changed it, it defaults to `"float32"`). Defaults to `None`. Returns: @@ -107,10 +107,10 @@ def array_to_img(x, data_format=None, scale=True, dtype=None): raise ValueError(f"Unsupported channel number: {x.shape[2]}") -@keras_core_export( +@keras_export( [ - "keras_core.utils.img_to_array", - "keras_core.preprocessing.image.img_to_array", + "keras.utils.img_to_array", + "keras.preprocessing.image.img_to_array", ] ) def img_to_array(img, data_format=None, dtype=None): @@ -121,18 +121,18 @@ def img_to_array(img, data_format=None, dtype=None): ```python from PIL import Image img_data = np.random.random(size=(100, 100, 3)) - img = keras_core.utils.array_to_img(img_data) - array = keras_core.utils.image.img_to_array(img) + img = keras.utils.array_to_img(img_data) + array = keras.utils.image.img_to_array(img) ``` Args: img: Input PIL Image instance. data_format: Image data format, can be either `"channels_first"` or `"channels_last"`. Defaults to `None`, in which case the global - setting `keras_core.backend.image_data_format()` is used (unless you + setting `keras.backend.image_data_format()` is used (unless you changed it, it defaults to `"channels_last"`). dtype: Dtype to use. `None` means the global setting - `keras_core.backend.floatx()` is used (unless you changed it, it + `keras.backend.floatx()` is used (unless you changed it, it defaults to `"float32"`). Returns: @@ -159,8 +159,8 @@ def img_to_array(img, data_format=None, dtype=None): return x -@keras_core_export( - ["keras_core.utils.save_img", "keras_core.preprocessing.image.save_img"] +@keras_export( + ["keras.utils.save_img", "keras.preprocessing.image.save_img"] ) def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): """Saves an image stored as a NumPy array to a path or file object. @@ -186,8 +186,8 @@ def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): img.save(path, format=file_format, **kwargs) -@keras_core_export( - ["keras_core.utils.load_img", "keras_core.preprocessing.image.load_img"] +@keras_export( + ["keras.utils.load_img", "keras.preprocessing.image.load_img"] ) def load_img( path, @@ -201,8 +201,8 @@ def load_img( Usage: ```python - image = keras_core.utils.load_img(image_path) - input_arr = keras_core.utils.img_to_array(image) + image = keras.utils.load_img(image_path) + input_arr = keras.utils.img_to_array(image) input_arr = np.array([input_arr]) # Convert single image to a batch. predictions = model.predict(input_arr) ``` diff --git a/keras_core/utils/io_utils.py b/keras/utils/io_utils.py similarity index 85% rename from keras_core/utils/io_utils.py rename to keras/utils/io_utils.py index c98160fbc..3d848742b 100644 --- a/keras_core/utils/io_utils.py +++ b/keras/utils/io_utils.py @@ -2,14 +2,14 @@ import sys from absl import logging -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state +from keras.api_export import keras_export +from keras.backend.common import global_state -@keras_core_export( +@keras_export( [ - "keras_core.config.enable_interactive_logging", - "keras_core.utils.enable_interactive_logging", + "keras.config.enable_interactive_logging", + "keras.utils.enable_interactive_logging", ] ) def enable_interactive_logging(): @@ -22,10 +22,10 @@ def enable_interactive_logging(): global_state.set_global_attribute("interactive_logging", True) -@keras_core_export( +@keras_export( [ - "keras_core.config.disable_interactive_logging", - "keras_core.utils.disable_interactive_logging", + "keras.config.disable_interactive_logging", + "keras.utils.disable_interactive_logging", ] ) def disable_interactive_logging(): @@ -38,10 +38,10 @@ def disable_interactive_logging(): global_state.set_global_attribute("interactive_logging", False) -@keras_core_export( +@keras_export( [ - "keras_core.config.is_interactive_logging_enabled", - "keras_core.utils.is_interactive_logging_enabled", + "keras.config.is_interactive_logging_enabled", + "keras.utils.is_interactive_logging_enabled", ] ) def is_interactive_logging_enabled(): diff --git a/keras_core/utils/io_utils_test.py b/keras/utils/io_utils_test.py similarity index 96% rename from keras_core/utils/io_utils_test.py rename to keras/utils/io_utils_test.py index bf57f5ca2..20eb2add5 100644 --- a/keras_core/utils/io_utils_test.py +++ b/keras/utils/io_utils_test.py @@ -1,7 +1,7 @@ from unittest.mock import patch -from keras_core.testing import test_case -from keras_core.utils import io_utils +from keras.testing import test_case +from keras.utils import io_utils class TestIoUtils(test_case.TestCase): diff --git a/keras_core/utils/jax_utils.py b/keras/utils/jax_utils.py similarity index 86% rename from keras_core/utils/jax_utils.py rename to keras/utils/jax_utils.py index 3bb15cc5f..1c79258bf 100644 --- a/keras_core/utils/jax_utils.py +++ b/keras/utils/jax_utils.py @@ -1,4 +1,4 @@ -from keras_core import backend +from keras import backend def is_in_jax_tracing_scope(): diff --git a/keras_core/utils/model_visualization.py b/keras/utils/model_visualization.py similarity index 96% rename from keras_core/utils/model_visualization.py rename to keras/utils/model_visualization.py index 922e68cc4..287facea0 100644 --- a/keras_core/utils/model_visualization.py +++ b/keras/utils/model_visualization.py @@ -3,8 +3,8 @@ import os import sys -from keras_core.api_export import keras_core_export -from keras_core.utils import io_utils +from keras.api_export import keras_export +from keras.utils import io_utils try: # pydot-ng is a fork of pydot that is better maintained. @@ -164,7 +164,7 @@ def make_node(layer, **kwargs): return node -@keras_core_export("keras_core.utils.model_to_dot") +@keras_export("keras.utils.model_to_dot") def model_to_dot( model, show_shapes=False, @@ -201,7 +201,7 @@ def model_to_dot( a `pydot.Cluster` instance representing nested model if `subgraph=True`. """ - from keras_core.ops.function import make_node_key + from keras.ops.function import make_node_key if not model.built: raise ValueError( @@ -210,10 +210,10 @@ def model_to_dot( "the model on a batch of data." ) - from keras_core.models import functional - from keras_core.models import sequential + from keras.models import functional + from keras.models import sequential - # from keras_core.layers import Wrapper + # from keras.layers import Wrapper if not check_pydot(): raise ImportError( @@ -336,7 +336,7 @@ def model_to_dot( return dot -@keras_core_export("keras_core.utils.plot_model") +@keras_export("keras.utils.plot_model") def plot_model( model, to_file="model.png", @@ -357,10 +357,10 @@ def plot_model( ```python inputs = ... outputs = ... - model = keras_core.Model(inputs=inputs, outputs=outputs) + model = keras.Model(inputs=inputs, outputs=outputs) dot_img_file = '/tmp/model_1.png' - keras_core.utils.plot_model(model, to_file=dot_img_file, show_shapes=True) + keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True) ``` Args: diff --git a/keras_core/utils/module_utils.py b/keras/utils/module_utils.py similarity index 100% rename from keras_core/utils/module_utils.py rename to keras/utils/module_utils.py diff --git a/keras_core/utils/naming.py b/keras/utils/naming.py similarity index 91% rename from keras_core/utils/naming.py rename to keras/utils/naming.py index 2a7e21060..b16f429fc 100644 --- a/keras_core/utils/naming.py +++ b/keras/utils/naming.py @@ -1,8 +1,8 @@ import collections import re -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state +from keras.api_export import keras_export +from keras.backend.common import global_state def auto_name(prefix): @@ -31,7 +31,7 @@ def to_snake_case(name): return name -@keras_core_export("keras_core.backend.get_uid") +@keras_export("keras.backend.get_uid") def get_uid(prefix=""): """Associates a string prefix with an integer counter. diff --git a/keras_core/utils/naming_test.py b/keras/utils/naming_test.py similarity index 98% rename from keras_core/utils/naming_test.py rename to keras/utils/naming_test.py index 40e9fa8fe..c5b0752a1 100644 --- a/keras_core/utils/naming_test.py +++ b/keras/utils/naming_test.py @@ -1,5 +1,5 @@ -from keras_core.testing import test_case -from keras_core.utils import naming +from keras.testing import test_case +from keras.utils import naming class NamingUtilsTest(test_case.TestCase): diff --git a/keras_core/utils/nest.py b/keras/utils/nest.py similarity index 100% rename from keras_core/utils/nest.py rename to keras/utils/nest.py diff --git a/keras_core/utils/numerical_utils.py b/keras/utils/numerical_utils.py similarity index 91% rename from keras_core/utils/numerical_utils.py rename to keras/utils/numerical_utils.py index fa515f345..cf5266ffd 100644 --- a/keras_core/utils/numerical_utils.py +++ b/keras/utils/numerical_utils.py @@ -1,10 +1,10 @@ import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export +from keras import backend +from keras.api_export import keras_export -@keras_core_export("keras_core.utils.normalize") +@keras_export("keras.utils.normalize") def normalize(x, axis=-1, order=2): """Normalizes an array. @@ -19,7 +19,7 @@ def normalize(x, axis=-1, order=2): Returns: A normalized copy of the array. """ - from keras_core import ops + from keras import ops if not isinstance(order, int) or not order >= 1: raise ValueError( @@ -49,7 +49,7 @@ def normalize(x, axis=-1, order=2): return ops.multiply(x, norm) -@keras_core_export("keras_core.utils.to_categorical") +@keras_export("keras.utils.to_categorical") def to_categorical(x, num_classes=None): """Converts a class vector (integers) to binary class matrix. @@ -67,7 +67,7 @@ def to_categorical(x, num_classes=None): Example: - >>> a = keras_core.utils.to_categorical([0, 1, 2, 3], num_classes=4) + >>> a = keras.utils.to_categorical([0, 1, 2, 3], num_classes=4) >>> print(a) [[1. 0. 0. 0.] [0. 1. 0. 0.] @@ -79,11 +79,11 @@ def to_categorical(x, num_classes=None): ... .04, .01, .94, .05, ... .12, .21, .5, .17], ... shape=[4, 4]) - >>> loss = keras_core.backend.categorical_crossentropy(a, b) + >>> loss = keras.backend.categorical_crossentropy(a, b) >>> print(np.around(loss, 5)) [0.10536 0.82807 0.1011 1.77196] - >>> loss = keras_core.backend.categorical_crossentropy(a, a) + >>> loss = keras.backend.categorical_crossentropy(a, a) >>> print(np.around(loss, 5)) [0. 0. 0. 0.] """ diff --git a/keras_core/utils/numerical_utils_test.py b/keras/utils/numerical_utils_test.py similarity index 95% rename from keras_core/utils/numerical_utils_test.py rename to keras/utils/numerical_utils_test.py index e8c7ed29e..d4cc7de57 100644 --- a/keras_core/utils/numerical_utils_test.py +++ b/keras/utils/numerical_utils_test.py @@ -1,9 +1,9 @@ import numpy as np from absl.testing import parameterized -from keras_core import backend -from keras_core import testing -from keras_core.utils import numerical_utils +from keras import backend +from keras import testing +from keras.utils import numerical_utils NUM_CLASSES = 5 diff --git a/keras_core/utils/progbar.py b/keras/utils/progbar.py similarity index 98% rename from keras_core/utils/progbar.py rename to keras/utils/progbar.py index 97c59047f..554eb0068 100644 --- a/keras_core/utils/progbar.py +++ b/keras/utils/progbar.py @@ -3,12 +3,12 @@ import os import sys import time -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.utils import io_utils +from keras import backend +from keras.api_export import keras_export +from keras.utils import io_utils -@keras_core_export("keras_core.utils.Progbar") +@keras_export("keras.utils.Progbar") class Progbar: """Displays a progress bar. diff --git a/keras_core/utils/python_utils.py b/keras/utils/python_utils.py similarity index 100% rename from keras_core/utils/python_utils.py rename to keras/utils/python_utils.py diff --git a/keras_core/utils/python_utils_test.py b/keras/utils/python_utils_test.py similarity index 97% rename from keras_core/utils/python_utils_test.py rename to keras/utils/python_utils_test.py index 24bdf3119..33d7e72e3 100644 --- a/keras_core/utils/python_utils_test.py +++ b/keras/utils/python_utils_test.py @@ -1,8 +1,8 @@ import base64 import marshal -from keras_core import testing -from keras_core.utils import python_utils +from keras import testing +from keras.utils import python_utils class PythonUtilsTest(testing.TestCase): diff --git a/keras_core/utils/rng_utils.py b/keras/utils/rng_utils.py similarity index 83% rename from keras_core/utils/rng_utils.py rename to keras/utils/rng_utils.py index 2b1f0ec7d..141a47bb1 100644 --- a/keras_core/utils/rng_utils.py +++ b/keras/utils/rng_utils.py @@ -2,12 +2,12 @@ import random import numpy as np -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.utils.module_utils import tensorflow as tf +from keras import backend +from keras.api_export import keras_export +from keras.utils.module_utils import tensorflow as tf -@keras_core_export("keras_core.utils.set_random_seed") +@keras_export("keras.utils.set_random_seed") def set_random_seed(seed): """Sets all random seeds (Python, NumPy, and backend framework, e.g. TF). @@ -22,7 +22,7 @@ def set_random_seed(seed): ```python import random import numpy as np - from keras_core.utils.module_utils import tensorflow as tf + from keras.utils.module_utils import tensorflow as tf random.seed(seed) np.random.seed(seed) tf.random.set_seed(seed) diff --git a/keras_core/utils/rng_utils_test.py b/keras/utils/rng_utils_test.py similarity index 69% rename from keras_core/utils/rng_utils_test.py rename to keras/utils/rng_utils_test.py index 534207c1b..f6ec741c4 100644 --- a/keras_core/utils/rng_utils_test.py +++ b/keras/utils/rng_utils_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest import tensorflow as tf -import keras_core -from keras_core import backend -from keras_core.testing import test_case -from keras_core.utils import rng_utils +import keras +from keras import backend +from keras.testing import test_case +from keras.utils import rng_utils class TestRandomSeedSetting(test_case.TestCase): @@ -15,11 +15,11 @@ class TestRandomSeedSetting(test_case.TestCase): ) def test_set_random_seed(self): def get_model_output(): - model = keras_core.Sequential( + model = keras.Sequential( [ - keras_core.layers.Dense(10), - keras_core.layers.Dropout(0.5), - keras_core.layers.Dense(10), + keras.layers.Dense(10), + keras.layers.Dropout(0.5), + keras.layers.Dense(10), ] ) x = np.random.random((32, 10)).astype("float32") diff --git a/keras_core/utils/sequence_utils.py b/keras/utils/sequence_utils.py similarity index 91% rename from keras_core/utils/sequence_utils.py rename to keras/utils/sequence_utils.py index 18e4830ec..5ba2903fe 100644 --- a/keras_core/utils/sequence_utils.py +++ b/keras/utils/sequence_utils.py @@ -1,12 +1,12 @@ import numpy as np -from keras_core.api_export import keras_core_export +from keras.api_export import keras_export -@keras_core_export( +@keras_export( [ - "keras_core.utils.pad_sequences", - "keras_core.preprocessing.sequence.pad_sequences", + "keras.utils.pad_sequences", + "keras.preprocessing.sequence.pad_sequences", ] ) def pad_sequences( @@ -37,22 +37,22 @@ def pad_sequences( default. >>> sequence = [[1], [2, 3], [4, 5, 6]] - >>> keras_core.utils.pad_sequences(sequence) + >>> keras.utils.pad_sequences(sequence) array([[0, 0, 1], [0, 2, 3], [4, 5, 6]], dtype=int32) - >>> keras_core.utils.pad_sequences(sequence, value=-1) + >>> keras.utils.pad_sequences(sequence, value=-1) array([[-1, -1, 1], [-1, 2, 3], [ 4, 5, 6]], dtype=int32) - >>> keras_core.utils.pad_sequences(sequence, padding='post') + >>> keras.utils.pad_sequences(sequence, padding='post') array([[1, 0, 0], [2, 3, 0], [4, 5, 6]], dtype=int32) - >>> keras_core.utils.pad_sequences(sequence, maxlen=2) + >>> keras.utils.pad_sequences(sequence, maxlen=2) array([[0, 1], [2, 3], [5, 6]], dtype=int32) diff --git a/keras_core/utils/sequence_utils_test.py b/keras/utils/sequence_utils_test.py similarity index 97% rename from keras_core/utils/sequence_utils_test.py rename to keras/utils/sequence_utils_test.py index 27a20f87c..64cb2b9b0 100644 --- a/keras_core/utils/sequence_utils_test.py +++ b/keras/utils/sequence_utils_test.py @@ -1,5 +1,5 @@ -from keras_core import testing -from keras_core.utils import sequence_utils +from keras import testing +from keras.utils import sequence_utils class PadSequencesTest(testing.TestCase): diff --git a/keras_core/utils/shape_utils.py b/keras/utils/shape_utils.py similarity index 100% rename from keras_core/utils/shape_utils.py rename to keras/utils/shape_utils.py diff --git a/keras_core/utils/summary_utils.py b/keras/utils/summary_utils.py similarity index 98% rename from keras_core/utils/summary_utils.py rename to keras/utils/summary_utils.py index 06259fb4c..0dae07421 100644 --- a/keras_core/utils/summary_utils.py +++ b/keras/utils/summary_utils.py @@ -6,14 +6,14 @@ import rich import rich.console import rich.markup -# See https://github.com/keras-team/keras-core/issues/448 +# See https://github.com/keras-team/keras/issues/448 # for below imports import rich.table import tree -from keras_core import backend -from keras_core.utils import dtype_utils -from keras_core.utils import io_utils +from keras import backend +from keras.utils import dtype_utils +from keras.utils import io_utils def count_params(weights): @@ -131,8 +131,8 @@ def print_summary( matches `layer_range[1]`. By default (`None`) all layers in the model are included in the summary. """ - from keras_core.models import Functional - from keras_core.models import Sequential + from keras.models import Functional + from keras.models import Sequential if not print_fn and not io_utils.is_interactive_logging_enabled(): print_fn = io_utils.print_msg diff --git a/keras_core/utils/summary_utils_test.py b/keras/utils/summary_utils_test.py similarity index 92% rename from keras_core/utils/summary_utils_test.py rename to keras/utils/summary_utils_test.py index 4570fceca..51f764a74 100644 --- a/keras_core/utils/summary_utils_test.py +++ b/keras/utils/summary_utils_test.py @@ -2,10 +2,10 @@ import numpy as np import pytest from absl.testing import parameterized -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core.utils import summary_utils +from keras import layers +from keras import models +from keras import testing +from keras.utils import summary_utils class SummaryUtilsTest(testing.TestCase, parameterized.TestCase): diff --git a/keras_core/utils/text_dataset_utils.py b/keras/utils/text_dataset_utils.py similarity index 97% rename from keras_core/utils/text_dataset_utils.py rename to keras/utils/text_dataset_utils.py index cc641fc6a..d418754df 100644 --- a/keras_core/utils/text_dataset_utils.py +++ b/keras/utils/text_dataset_utils.py @@ -1,14 +1,14 @@ import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils import dataset_utils -from keras_core.utils.module_utils import tensorflow as tf +from keras.api_export import keras_export +from keras.utils import dataset_utils +from keras.utils.module_utils import tensorflow as tf -@keras_core_export( +@keras_export( [ - "keras_core.utils.text_dataset_from_directory", - "keras_core.preprocessing.text_dataset_from_directory", + "keras.utils.text_dataset_from_directory", + "keras.preprocessing.text_dataset_from_directory", ] ) def text_dataset_from_directory( diff --git a/keras_core/utils/text_dataset_utils_test.py b/keras/utils/text_dataset_utils_test.py similarity index 99% rename from keras_core/utils/text_dataset_utils_test.py rename to keras/utils/text_dataset_utils_test.py index ec38c2fec..224b659de 100644 --- a/keras_core/utils/text_dataset_utils_test.py +++ b/keras/utils/text_dataset_utils_test.py @@ -2,8 +2,8 @@ import os import random import string -from keras_core import testing -from keras_core.utils import text_dataset_utils +from keras import testing +from keras.utils import text_dataset_utils class TextDatasetFromDirectoryTest(testing.TestCase): diff --git a/keras_core/utils/tf_utils.py b/keras/utils/tf_utils.py similarity index 98% rename from keras_core/utils/tf_utils.py rename to keras/utils/tf_utils.py index 3797da507..5a622a3a0 100644 --- a/keras_core/utils/tf_utils.py +++ b/keras/utils/tf_utils.py @@ -1,4 +1,4 @@ -from keras_core.utils.module_utils import tensorflow as tf +from keras.utils.module_utils import tensorflow as tf def expand_dims(inputs, axis): diff --git a/keras_core/utils/timeseries_dataset_utils.py b/keras/utils/timeseries_dataset_utils.py similarity index 97% rename from keras_core/utils/timeseries_dataset_utils.py rename to keras/utils/timeseries_dataset_utils.py index d98d90684..c0a60482f 100644 --- a/keras_core/utils/timeseries_dataset_utils.py +++ b/keras/utils/timeseries_dataset_utils.py @@ -1,13 +1,13 @@ import numpy as np -from keras_core.api_export import keras_core_export -from keras_core.utils.module_utils import tensorflow as tf +from keras.api_export import keras_export +from keras.utils.module_utils import tensorflow as tf -@keras_core_export( +@keras_export( [ - "keras_core.utils.timeseries_dataset_from_array", - "keras_core.preprocessing.timeseries_dataset_from_array", + "keras.utils.timeseries_dataset_from_array", + "keras.preprocessing.timeseries_dataset_from_array", ] ) def timeseries_dataset_from_array( diff --git a/keras_core/utils/timeseries_dataset_utils_test.py b/keras/utils/timeseries_dataset_utils_test.py similarity index 98% rename from keras_core/utils/timeseries_dataset_utils_test.py rename to keras/utils/timeseries_dataset_utils_test.py index b5eca0960..a68ee36ae 100644 --- a/keras_core/utils/timeseries_dataset_utils_test.py +++ b/keras/utils/timeseries_dataset_utils_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras_core import testing -from keras_core.utils import timeseries_dataset_utils +from keras import testing +from keras.utils import timeseries_dataset_utils class TimeseriesDatasetTest(testing.TestCase): diff --git a/keras_core/utils/torch_utils.py b/keras/utils/torch_utils.py similarity index 89% rename from keras_core/utils/torch_utils.py rename to keras/utils/torch_utils.py index 3adcf7314..eb170eeec 100644 --- a/keras_core/utils/torch_utils.py +++ b/keras/utils/torch_utils.py @@ -1,8 +1,8 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers import Layer +from keras.api_export import keras_export +from keras.layers import Layer -@keras_core_export("keras_core.layers.TorchModuleWrapper") +@keras_export("keras.layers.TorchModuleWrapper") class TorchModuleWrapper(Layer): """Torch module wrapper layer. @@ -26,10 +26,10 @@ class TorchModuleWrapper(Layer): import torch.nn as nn import torch.nn.functional as F - import keras_core - from keras_core.layers import TorchModuleWrapper + import keras + from keras.layers import TorchModuleWrapper - class Classifier(keras_core.Model): + class Classifier(keras.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Wrap `torch.nn.Module`s with `TorchModuleWrapper` @@ -83,7 +83,7 @@ class TorchModuleWrapper(Layer): f"Received uninitialized LazyModule: module={module}" ) - from keras_core.backend.torch.core import get_device + from keras.backend.torch.core import get_device self.module = module.to(get_device()) self._track_module_parameters() @@ -92,7 +92,7 @@ class TorchModuleWrapper(Layer): return self.module.parameters(recurse=recurse) def _track_module_parameters(self): - from keras_core.backend.torch import Variable + from keras.backend.torch import Variable for param in self.module.parameters(): variable = Variable( diff --git a/keras_core/utils/torch_utils_test.py b/keras/utils/torch_utils_test.py similarity index 90% rename from keras_core/utils/torch_utils_test.py rename to keras/utils/torch_utils_test.py index 606128b77..986961866 100644 --- a/keras_core/utils/torch_utils_test.py +++ b/keras/utils/torch_utils_test.py @@ -2,11 +2,11 @@ import numpy as np import pytest import torch -from keras_core import backend -from keras_core import layers -from keras_core import models -from keras_core import testing -from keras_core.utils.torch_utils import TorchModuleWrapper +from keras import backend +from keras import layers +from keras import models +from keras import testing +from keras.utils.torch_utils import TorchModuleWrapper class Classifier(models.Model): diff --git a/keras_core/utils/traceback_utils.py b/keras/utils/traceback_utils.py similarity index 87% rename from keras_core/utils/traceback_utils.py rename to keras/utils/traceback_utils.py index 8e017c14a..037f2833d 100644 --- a/keras_core/utils/traceback_utils.py +++ b/keras/utils/traceback_utils.py @@ -6,9 +6,9 @@ from functools import wraps import tree -from keras_core import backend -from keras_core.api_export import keras_core_export -from keras_core.backend.common import global_state +from keras import backend +from keras.api_export import keras_export +from keras.backend.common import global_state _EXCLUDED_PATHS = ( os.path.abspath(os.path.join(__file__, "..", "..")), @@ -16,7 +16,7 @@ _EXCLUDED_PATHS = ( ) -@keras_core_export("keras_core.config.enable_traceback_filtering") +@keras_export("keras.config.enable_traceback_filtering") def enable_traceback_filtering(): """Turn on traceback filtering. @@ -27,17 +27,17 @@ def enable_traceback_filtering(): raises, to keep traceback short, readable, and focused on what's actionable for you (your own code). - See also `keras_core.config.disable_traceback_filtering()` and - `keras_core.config.is_traceback_filtering_enabled()`. + See also `keras.config.disable_traceback_filtering()` and + `keras.config.is_traceback_filtering_enabled()`. If you have previously disabled traceback filtering via - `keras_core.config.disable_traceback_filtering()`, you can re-enable it via - `keras_core.config.enable_traceback_filtering()`. + `keras.config.disable_traceback_filtering()`, you can re-enable it via + `keras.config.enable_traceback_filtering()`. """ global_state.set_global_attribute("traceback_filtering", True) -@keras_core_export("keras_core.config.disable_traceback_filtering") +@keras_export("keras.config.disable_traceback_filtering") def disable_traceback_filtering(): """Turn off traceback filtering. @@ -48,17 +48,17 @@ def disable_traceback_filtering(): raises, to keep traceback short, readable, and focused on what's actionable for you (your own code). - See also `keras_core.config.enable_traceback_filtering()` and - `keras_core.config.is_traceback_filtering_enabled()`. + See also `keras.config.enable_traceback_filtering()` and + `keras.config.is_traceback_filtering_enabled()`. If you have previously disabled traceback filtering via - `keras_core.config.disable_traceback_filtering()`, you can re-enable it via - `keras_core.config.enable_traceback_filtering()`. + `keras.config.disable_traceback_filtering()`, you can re-enable it via + `keras.config.enable_traceback_filtering()`. """ global_state.set_global_attribute("traceback_filtering", False) -@keras_core_export("keras_core.config.is_traceback_filtering_enabled") +@keras_export("keras.config.is_traceback_filtering_enabled") def is_traceback_filtering_enabled(): """Check if traceback filtering is enabled. @@ -69,12 +69,12 @@ def is_traceback_filtering_enabled(): raises, to keep traceback short, readable, and focused on what's actionable for you (your own code). - See also `keras_core.config.enable_traceback_filtering()` and - `keras_core.config.disable_traceback_filtering()`. + See also `keras.config.enable_traceback_filtering()` and + `keras.config.disable_traceback_filtering()`. If you have previously disabled traceback filtering via - `keras_core.config.disable_traceback_filtering()`, you can re-enable it via - `keras_core.config.enable_traceback_filtering()`. + `keras.config.disable_traceback_filtering()`, you can re-enable it via + `keras.config.enable_traceback_filtering()`. Returns: Boolean, `True` if traceback filtering is enabled, @@ -119,7 +119,7 @@ def filter_traceback(fn): except Exception as e: filtered_tb = _process_traceback_frames(e.__traceback__) # To get the full stack trace, call: - # `keras_core.config.disable_traceback_filtering()` + # `keras.config.disable_traceback_filtering()` raise e.with_traceback(filtered_tb) from None finally: del filtered_tb diff --git a/keras_core/utils/tracking.py b/keras/utils/tracking.py similarity index 97% rename from keras_core/utils/tracking.py rename to keras/utils/tracking.py index 37cde722e..67c6b2f02 100644 --- a/keras_core/utils/tracking.py +++ b/keras/utils/tracking.py @@ -1,7 +1,7 @@ from functools import wraps -from keras_core.backend.common.global_state import get_global_attribute -from keras_core.backend.common.global_state import set_global_attribute +from keras.backend.common.global_state import get_global_attribute +from keras.backend.common.global_state import set_global_attribute class DotNotTrackScope: diff --git a/keras_core/utils/tracking_test.py b/keras/utils/tracking_test.py similarity index 100% rename from keras_core/utils/tracking_test.py rename to keras/utils/tracking_test.py diff --git a/keras_core/version.py b/keras/version.py similarity index 100% rename from keras_core/version.py rename to keras/version.py diff --git a/keras_core/__init__.py b/keras_core/__init__.py deleted file mode 100644 index 343ff36f3..000000000 --- a/keras_core/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from keras_core import activations -from keras_core import applications -from keras_core import backend -from keras_core import constraints -from keras_core import datasets -from keras_core import initializers -from keras_core import layers -from keras_core import models -from keras_core import ops -from keras_core import optimizers -from keras_core import regularizers -from keras_core import utils -from keras_core.backend import KerasTensor -from keras_core.layers import Input -from keras_core.layers import Layer -from keras_core.models import Functional -from keras_core.models import Model -from keras_core.models import Sequential -from keras_core.version import __version__ diff --git a/keras_core/backend/__init__.py b/keras_core/backend/__init__.py deleted file mode 100644 index 2058937a2..000000000 --- a/keras_core/backend/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -from keras_core.backend.config import backend - -if backend() == "torch": - # When using the torch backend, - # torch needs to be imported first, otherwise it will segfault - # upon import. - import torch - -from keras_core.backend.common.keras_tensor import KerasTensor -from keras_core.backend.common.keras_tensor import any_symbolic_tensors -from keras_core.backend.common.keras_tensor import is_keras_tensor -from keras_core.backend.common.name_scope import name_scope -from keras_core.backend.common.stateless_scope import StatelessScope -from keras_core.backend.common.stateless_scope import get_stateless_scope -from keras_core.backend.common.stateless_scope import in_stateless_scope -from keras_core.backend.common.variables import AutocastScope -from keras_core.backend.common.variables import get_autocast_scope -from keras_core.backend.common.variables import is_float_dtype -from keras_core.backend.common.variables import is_int_dtype -from keras_core.backend.common.variables import standardize_dtype -from keras_core.backend.common.variables import standardize_shape -from keras_core.backend.config import epsilon -from keras_core.backend.config import floatx -from keras_core.backend.config import image_data_format -from keras_core.backend.config import set_epsilon -from keras_core.backend.config import set_floatx -from keras_core.backend.config import set_image_data_format -from keras_core.backend.config import standardize_data_format -from keras_core.utils.io_utils import print_msg - -# Import backend functions. -if backend() == "tensorflow": - print_msg("Using TensorFlow backend") - from keras_core.backend.tensorflow import * # noqa: F403 - - distribution_lib = None -elif backend() == "jax": - print_msg("Using JAX backend.") - from keras_core.backend.jax import * # noqa: F403 -elif backend() == "torch": - print_msg("Using PyTorch backend.") - from keras_core.backend.torch import * # noqa: F403 - - distribution_lib = None -elif backend() == "numpy": - print_msg( - "Using NumPy backend.\nThe NumPy backend does not support " - "training. It should only be used for inference, evaluation, " - "and debugging." - ) - from keras_core.backend.numpy import * # noqa: F403 - - distribution_lib = None -else: - raise ValueError(f"Unable to import backend : {backend()}") diff --git a/keras_core/backend/common/__init__.py b/keras_core/backend/common/__init__.py deleted file mode 100644 index a28c7e86e..000000000 --- a/keras_core/backend/common/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from keras_core.backend.common import backend_utils -from keras_core.backend.common.variables import AutocastScope -from keras_core.backend.common.variables import KerasVariable -from keras_core.backend.common.variables import get_autocast_scope -from keras_core.backend.common.variables import is_float_dtype -from keras_core.backend.common.variables import is_int_dtype -from keras_core.backend.common.variables import standardize_dtype -from keras_core.backend.common.variables import standardize_shape -from keras_core.random import random diff --git a/keras_core/backend/jax/__init__.py b/keras_core/backend/jax/__init__.py deleted file mode 100644 index 260081beb..000000000 --- a/keras_core/backend/jax/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -from keras_core.backend.jax import core -from keras_core.backend.jax import distribution_lib -from keras_core.backend.jax import image -from keras_core.backend.jax import math -from keras_core.backend.jax import nn -from keras_core.backend.jax import numpy -from keras_core.backend.jax import random -from keras_core.backend.jax.core import SUPPORTS_SPARSE_TENSORS -from keras_core.backend.jax.core import Variable -from keras_core.backend.jax.core import cast -from keras_core.backend.jax.core import compute_output_spec -from keras_core.backend.jax.core import cond -from keras_core.backend.jax.core import convert_to_numpy -from keras_core.backend.jax.core import convert_to_tensor -from keras_core.backend.jax.core import is_tensor -from keras_core.backend.jax.core import scatter -from keras_core.backend.jax.core import shape -from keras_core.backend.jax.core import stop_gradient -from keras_core.backend.jax.core import vectorized_map -from keras_core.backend.jax.rnn import cudnn_ok -from keras_core.backend.jax.rnn import gru -from keras_core.backend.jax.rnn import lstm -from keras_core.backend.jax.rnn import rnn diff --git a/keras_core/backend/numpy/__init__.py b/keras_core/backend/numpy/__init__.py deleted file mode 100644 index 82d22bb26..000000000 --- a/keras_core/backend/numpy/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from keras_core.backend.numpy import core -from keras_core.backend.numpy import image -from keras_core.backend.numpy import math -from keras_core.backend.numpy import nn -from keras_core.backend.numpy import numpy -from keras_core.backend.numpy import random -from keras_core.backend.numpy.core import SUPPORTS_SPARSE_TENSORS -from keras_core.backend.numpy.core import Variable -from keras_core.backend.numpy.core import cast -from keras_core.backend.numpy.core import compute_output_spec -from keras_core.backend.numpy.core import cond -from keras_core.backend.numpy.core import convert_to_numpy -from keras_core.backend.numpy.core import convert_to_tensor -from keras_core.backend.numpy.core import is_tensor -from keras_core.backend.numpy.core import shape -from keras_core.backend.numpy.core import vectorized_map -from keras_core.backend.numpy.rnn import cudnn_ok -from keras_core.backend.numpy.rnn import gru -from keras_core.backend.numpy.rnn import lstm -from keras_core.backend.numpy.rnn import rnn diff --git a/keras_core/backend/tensorflow/__init__.py b/keras_core/backend/tensorflow/__init__.py deleted file mode 100644 index f70cfdb43..000000000 --- a/keras_core/backend/tensorflow/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from keras_core.backend.tensorflow import core -from keras_core.backend.tensorflow import image -from keras_core.backend.tensorflow import math -from keras_core.backend.tensorflow import nn -from keras_core.backend.tensorflow import numpy -from keras_core.backend.tensorflow import random -from keras_core.backend.tensorflow import tensorboard -from keras_core.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS -from keras_core.backend.tensorflow.core import Variable -from keras_core.backend.tensorflow.core import cast -from keras_core.backend.tensorflow.core import compute_output_spec -from keras_core.backend.tensorflow.core import cond -from keras_core.backend.tensorflow.core import convert_to_numpy -from keras_core.backend.tensorflow.core import convert_to_tensor -from keras_core.backend.tensorflow.core import is_tensor -from keras_core.backend.tensorflow.core import name_scope -from keras_core.backend.tensorflow.core import scatter -from keras_core.backend.tensorflow.core import shape -from keras_core.backend.tensorflow.core import stop_gradient -from keras_core.backend.tensorflow.core import vectorized_map -from keras_core.backend.tensorflow.rnn import cudnn_ok -from keras_core.backend.tensorflow.rnn import gru -from keras_core.backend.tensorflow.rnn import lstm -from keras_core.backend.tensorflow.rnn import rnn diff --git a/keras_core/backend/torch/__init__.py b/keras_core/backend/torch/__init__.py deleted file mode 100644 index 2be9709a8..000000000 --- a/keras_core/backend/torch/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Torch backend APIs. - -# Note on device placement - -Torch has a different device placement style compared to TF and JAX. -In short, variables/tensors are not created on GPU by default, -and the GPU cannot directly communicate with the CPU. -To bring Torch behavior in line with TF and JAX automated device placement, -we are doing the following to automate device placement if a GPU is available: - -- Variables are created on GPU. -- Input data will be placed on GPU at the first `keras_core.layers.Layer` call. -- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU. -- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy. -""" - -from keras_core.backend.torch import core -from keras_core.backend.torch import image -from keras_core.backend.torch import math -from keras_core.backend.torch import nn -from keras_core.backend.torch import numpy -from keras_core.backend.torch import random -from keras_core.backend.torch.core import SUPPORTS_SPARSE_TENSORS -from keras_core.backend.torch.core import Variable -from keras_core.backend.torch.core import cast -from keras_core.backend.torch.core import compute_output_spec -from keras_core.backend.torch.core import cond -from keras_core.backend.torch.core import convert_to_numpy -from keras_core.backend.torch.core import convert_to_tensor -from keras_core.backend.torch.core import is_tensor -from keras_core.backend.torch.core import scatter -from keras_core.backend.torch.core import shape -from keras_core.backend.torch.core import stop_gradient -from keras_core.backend.torch.core import to_torch_dtype -from keras_core.backend.torch.core import vectorized_map -from keras_core.backend.torch.rnn import cudnn_ok -from keras_core.backend.torch.rnn import gru -from keras_core.backend.torch.rnn import lstm -from keras_core.backend.torch.rnn import rnn diff --git a/keras_core/backend/torch/optimizers/__init__.py b/keras_core/backend/torch/optimizers/__init__.py deleted file mode 100644 index 1b7d9c306..000000000 --- a/keras_core/backend/torch/optimizers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from keras_core.backend.torch.optimizers.torch_optimizer import TorchOptimizer diff --git a/keras_core/backend/torch/optimizers/torch_adamw.py b/keras_core/backend/torch/optimizers/torch_adamw.py deleted file mode 100644 index 9f752a505..000000000 --- a/keras_core/backend/torch/optimizers/torch_adamw.py +++ /dev/null @@ -1,6 +0,0 @@ -from keras_core import optimizers -from keras_core.backend.torch.optimizers import torch_adam - - -class AdamW(torch_adam.Adam, optimizers.AdamW): - pass diff --git a/keras_core/callbacks/__init__.py b/keras_core/callbacks/__init__.py deleted file mode 100644 index a13cbc927..000000000 --- a/keras_core/callbacks/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from keras_core.callbacks.backup_and_restore_callback import BackupAndRestore -from keras_core.callbacks.callback import Callback -from keras_core.callbacks.callback_list import CallbackList -from keras_core.callbacks.csv_logger import CSVLogger -from keras_core.callbacks.early_stopping import EarlyStopping -from keras_core.callbacks.history import History -from keras_core.callbacks.lambda_callback import LambdaCallback -from keras_core.callbacks.learning_rate_scheduler import LearningRateScheduler -from keras_core.callbacks.model_checkpoint import ModelCheckpoint -from keras_core.callbacks.progbar_logger import ProgbarLogger -from keras_core.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau -from keras_core.callbacks.remote_monitor import RemoteMonitor -from keras_core.callbacks.tensorboard import TensorBoard -from keras_core.callbacks.terminate_on_nan import TerminateOnNaN diff --git a/keras_core/datasets/__init__.py b/keras_core/datasets/__init__.py deleted file mode 100644 index 4fde97f1e..000000000 --- a/keras_core/datasets/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Small NumPy datasets for debugging/testing.""" - -from keras_core.datasets import boston_housing -from keras_core.datasets import california_housing -from keras_core.datasets import cifar10 -from keras_core.datasets import cifar100 -from keras_core.datasets import fashion_mnist -from keras_core.datasets import imdb -from keras_core.datasets import mnist -from keras_core.datasets import reuters diff --git a/keras_core/export/__init__.py b/keras_core/export/__init__.py deleted file mode 100644 index 2d34bf5fc..000000000 --- a/keras_core/export/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from keras_core.export.export_lib import ExportArchive diff --git a/keras_core/layers/__init__.py b/keras_core/layers/__init__.py deleted file mode 100644 index 2f549f4d2..000000000 --- a/keras_core/layers/__init__.py +++ /dev/null @@ -1,170 +0,0 @@ -from keras_core.api_export import keras_core_export -from keras_core.layers.activations.activation import Activation -from keras_core.layers.activations.elu import ELU -from keras_core.layers.activations.leaky_relu import LeakyReLU -from keras_core.layers.activations.prelu import PReLU -from keras_core.layers.activations.relu import ReLU -from keras_core.layers.activations.softmax import Softmax -from keras_core.layers.attention.additive_attention import AdditiveAttention -from keras_core.layers.attention.attention import Attention -from keras_core.layers.attention.multi_head_attention import MultiHeadAttention -from keras_core.layers.convolutional.conv1d import Conv1D -from keras_core.layers.convolutional.conv1d_transpose import Conv1DTranspose -from keras_core.layers.convolutional.conv2d import Conv2D -from keras_core.layers.convolutional.conv2d_transpose import Conv2DTranspose -from keras_core.layers.convolutional.conv3d import Conv3D -from keras_core.layers.convolutional.conv3d_transpose import Conv3DTranspose -from keras_core.layers.convolutional.depthwise_conv1d import DepthwiseConv1D -from keras_core.layers.convolutional.depthwise_conv2d import DepthwiseConv2D -from keras_core.layers.convolutional.separable_conv1d import SeparableConv1D -from keras_core.layers.convolutional.separable_conv2d import SeparableConv2D -from keras_core.layers.core.dense import Dense -from keras_core.layers.core.einsum_dense import EinsumDense -from keras_core.layers.core.embedding import Embedding -from keras_core.layers.core.identity import Identity -from keras_core.layers.core.input_layer import Input -from keras_core.layers.core.input_layer import InputLayer -from keras_core.layers.core.lambda_layer import Lambda -from keras_core.layers.core.masking import Masking -from keras_core.layers.core.wrapper import Wrapper -from keras_core.layers.layer import Layer -from keras_core.layers.merging.add import Add -from keras_core.layers.merging.add import add -from keras_core.layers.merging.average import Average -from keras_core.layers.merging.average import average -from keras_core.layers.merging.concatenate import Concatenate -from keras_core.layers.merging.concatenate import concatenate -from keras_core.layers.merging.dot import Dot -from keras_core.layers.merging.dot import dot -from keras_core.layers.merging.maximum import Maximum -from keras_core.layers.merging.maximum import maximum -from keras_core.layers.merging.minimum import Minimum -from keras_core.layers.merging.minimum import minimum -from keras_core.layers.merging.multiply import Multiply -from keras_core.layers.merging.multiply import multiply -from keras_core.layers.merging.subtract import Subtract -from keras_core.layers.merging.subtract import subtract -from keras_core.layers.normalization.batch_normalization import ( - BatchNormalization, -) -from keras_core.layers.normalization.group_normalization import ( - GroupNormalization, -) -from keras_core.layers.normalization.layer_normalization import ( - LayerNormalization, -) -from keras_core.layers.normalization.spectral_normalization import ( - SpectralNormalization, -) -from keras_core.layers.normalization.unit_normalization import UnitNormalization -from keras_core.layers.pooling.average_pooling1d import AveragePooling1D -from keras_core.layers.pooling.average_pooling2d import AveragePooling2D -from keras_core.layers.pooling.average_pooling3d import AveragePooling3D -from keras_core.layers.pooling.global_average_pooling1d import ( - GlobalAveragePooling1D, -) -from keras_core.layers.pooling.global_average_pooling2d import ( - GlobalAveragePooling2D, -) -from keras_core.layers.pooling.global_average_pooling3d import ( - GlobalAveragePooling3D, -) -from keras_core.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D -from keras_core.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D -from keras_core.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D -from keras_core.layers.pooling.max_pooling1d import MaxPooling1D -from keras_core.layers.pooling.max_pooling2d import MaxPooling2D -from keras_core.layers.pooling.max_pooling3d import MaxPooling3D -from keras_core.layers.preprocessing.category_encoding import CategoryEncoding -from keras_core.layers.preprocessing.center_crop import CenterCrop -from keras_core.layers.preprocessing.discretization import Discretization -from keras_core.layers.preprocessing.hashed_crossing import HashedCrossing -from keras_core.layers.preprocessing.hashing import Hashing -from keras_core.layers.preprocessing.index_lookup import IndexLookup -from keras_core.layers.preprocessing.integer_lookup import IntegerLookup -from keras_core.layers.preprocessing.normalization import Normalization -from keras_core.layers.preprocessing.random_brightness import RandomBrightness -from keras_core.layers.preprocessing.random_contrast import RandomContrast -from keras_core.layers.preprocessing.random_crop import RandomCrop -from keras_core.layers.preprocessing.random_flip import RandomFlip -from keras_core.layers.preprocessing.random_rotation import RandomRotation -from keras_core.layers.preprocessing.random_translation import RandomTranslation -from keras_core.layers.preprocessing.random_zoom import RandomZoom -from keras_core.layers.preprocessing.rescaling import Rescaling -from keras_core.layers.preprocessing.resizing import Resizing -from keras_core.layers.preprocessing.string_lookup import StringLookup -from keras_core.layers.preprocessing.text_vectorization import TextVectorization -from keras_core.layers.regularization.activity_regularization import ( - ActivityRegularization, -) -from keras_core.layers.regularization.dropout import Dropout -from keras_core.layers.regularization.gaussian_dropout import GaussianDropout -from keras_core.layers.regularization.gaussian_noise import GaussianNoise -from keras_core.layers.regularization.spatial_dropout import SpatialDropout1D -from keras_core.layers.regularization.spatial_dropout import SpatialDropout2D -from keras_core.layers.regularization.spatial_dropout import SpatialDropout3D -from keras_core.layers.reshaping.cropping1d import Cropping1D -from keras_core.layers.reshaping.cropping2d import Cropping2D -from keras_core.layers.reshaping.cropping3d import Cropping3D -from keras_core.layers.reshaping.flatten import Flatten -from keras_core.layers.reshaping.permute import Permute -from keras_core.layers.reshaping.repeat_vector import RepeatVector -from keras_core.layers.reshaping.reshape import Reshape -from keras_core.layers.reshaping.up_sampling1d import UpSampling1D -from keras_core.layers.reshaping.up_sampling2d import UpSampling2D -from keras_core.layers.reshaping.up_sampling3d import UpSampling3D -from keras_core.layers.reshaping.zero_padding1d import ZeroPadding1D -from keras_core.layers.reshaping.zero_padding2d import ZeroPadding2D -from keras_core.layers.reshaping.zero_padding3d import ZeroPadding3D -from keras_core.layers.rnn.bidirectional import Bidirectional -from keras_core.layers.rnn.conv_lstm1d import ConvLSTM1D -from keras_core.layers.rnn.conv_lstm2d import ConvLSTM2D -from keras_core.layers.rnn.conv_lstm3d import ConvLSTM3D -from keras_core.layers.rnn.gru import GRU -from keras_core.layers.rnn.gru import GRUCell -from keras_core.layers.rnn.lstm import LSTM -from keras_core.layers.rnn.lstm import LSTMCell -from keras_core.layers.rnn.rnn import RNN -from keras_core.layers.rnn.simple_rnn import SimpleRNN -from keras_core.layers.rnn.simple_rnn import SimpleRNNCell -from keras_core.layers.rnn.stacked_rnn_cells import StackedRNNCells -from keras_core.layers.rnn.time_distributed import TimeDistributed -from keras_core.saving import serialization_lib - - -@keras_core_export("keras_core.layers.serialize") -def serialize(layer): - """Returns the layer configuration as a Python dict. - - Args: - layer: A `keras.layers.Layer` instance to serialize. - - Returns: - Python dict which contains the configuration of the layer. - """ - return serialization_lib.serialize_keras_object(layer) - - -@keras_core_export("keras_core.layers.deserialize") -def deserialize(config, custom_objects=None): - """Returns a Keras layer object via its configuration. - - Args: - config: A python dict containing a serialized layer configuration. - custom_objects: Optional dictionary mapping names (strings) to custom - objects (classes and functions) to be considered during - deserialization. - - Returns: - A Keras layer instance. - """ - obj = serialization_lib.deserialize_keras_object( - config, - custom_objects=custom_objects, - ) - if not isinstance(obj, Layer): - raise ValueError( - "`keras.layers.deserialize` was passed a `config` object that is " - f"not a `keras.layers.Layer`. Received: {config}" - ) - return obj diff --git a/keras_core/layers/activations/__init__.py b/keras_core/layers/activations/__init__.py deleted file mode 100644 index 70baaef7f..000000000 --- a/keras_core/layers/activations/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from keras_core.layers.activations.elu import ELU -from keras_core.layers.activations.leaky_relu import LeakyReLU -from keras_core.layers.activations.prelu import PReLU -from keras_core.layers.activations.relu import ReLU -from keras_core.layers.activations.softmax import Softmax diff --git a/keras_core/layers/merging/add.py b/keras_core/layers/merging/add.py deleted file mode 100644 index f44e36516..000000000 --- a/keras_core/layers/merging/add.py +++ /dev/null @@ -1,69 +0,0 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.merging.base_merge import Merge - - -@keras_core_export("keras_core.layers.Add") -class Add(Merge): - """Performs elementwise addition operation. - - It takes as input a list of tensors, all of the same shape, - and returns a single tensor (also of the same shape). - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.Add()([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> # equivalent to `added = keras_core.layers.add([x1, x2])` - >>> added = keras_core.layers.Add()([x1, x2]) - >>> out = keras_core.layers.Dense(4)(added) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - - def _merge_function(self, inputs): - output = inputs[0] - for i in range(1, len(inputs)): - output = ops.add(output, inputs[i]) - return output - - -@keras_core_export("keras_core.layers.add") -def add(inputs, **kwargs): - """Functional interface to the `keras_core.layers.Add` layer. - - Args: - inputs: A list of input tensors with the same shape. - **kwargs: Standard layer keyword arguments. - - Returns: - A tensor as the sum of the inputs. It has the same shape as the inputs. - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.add([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> added = keras_core.layers.add([x1, x2]) - >>> out = keras_core.layers.Dense(4)(added) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - return Add(**kwargs)(inputs) diff --git a/keras_core/layers/merging/average.py b/keras_core/layers/merging/average.py deleted file mode 100644 index 7a92b8076..000000000 --- a/keras_core/layers/merging/average.py +++ /dev/null @@ -1,70 +0,0 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.merging.base_merge import Merge - - -@keras_core_export("keras_core.layers.Average") -class Average(Merge): - """Averages a list of inputs element-wise.. - - It takes as input a list of tensors, all of the same shape, - and returns a single tensor (also of the same shape). - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.Average()([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> # equivalent to `y = keras_core.layers.average([x1, x2])` - >>> y = keras_core.layers.Average()([x1, x2]) - >>> out = keras_core.layers.Dense(4)(y) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - - def _merge_function(self, inputs): - output = inputs[0] - for i in range(1, len(inputs)): - output = ops.add(output, inputs[i]) - return output / len(inputs) - - -@keras_core_export("keras_core.layers.average") -def average(inputs, **kwargs): - """Functional interface to the `keras_core.layers.Average` layer. - - Args: - inputs: A list of input tensors , all of the same shape. - **kwargs: Standard layer keyword arguments. - - Returns: - A tensor as the element-wise product of the inputs with the same - shape as the inputs. - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.average([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> y = keras_core.layers.average([x1, x2]) - >>> out = keras_core.layers.Dense(4)(y) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - return Average(**kwargs)(inputs) diff --git a/keras_core/layers/merging/maximum.py b/keras_core/layers/merging/maximum.py deleted file mode 100644 index 6d8fe6565..000000000 --- a/keras_core/layers/merging/maximum.py +++ /dev/null @@ -1,70 +0,0 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.merging.base_merge import Merge - - -@keras_core_export("keras_core.layers.Maximum") -class Maximum(Merge): - """Computes element-wise maximum on a list of inputs. - - It takes as input a list of tensors, all of the same shape, - and returns a single tensor (also of the same shape). - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.Maximum()([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> # equivalent to `y = keras_core.layers.maximum([x1, x2])` - >>> y = keras_core.layers.Maximum()([x1, x2]) - >>> out = keras_core.layers.Dense(4)(y) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - - def _merge_function(self, inputs): - output = inputs[0] - for i in range(1, len(inputs)): - output = ops.maximum(output, inputs[i]) - return output - - -@keras_core_export("keras_core.layers.maximum") -def maximum(inputs, **kwargs): - """Functional interface to the `keras_core.layers.Maximum` layer. - - Args: - inputs: A list of input tensors , all of the same shape. - **kwargs: Standard layer keyword arguments. - - Returns: - A tensor as the element-wise product of the inputs with the same - shape as the inputs. - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.maximum([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> y = keras_core.layers.maximum([x1, x2]) - >>> out = keras_core.layers.Dense(4)(y) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - return Maximum(**kwargs)(inputs) diff --git a/keras_core/layers/merging/minimum.py b/keras_core/layers/merging/minimum.py deleted file mode 100644 index 28e8c14f2..000000000 --- a/keras_core/layers/merging/minimum.py +++ /dev/null @@ -1,70 +0,0 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.merging.base_merge import Merge - - -@keras_core_export("keras_core.layers.Minimum") -class Minimum(Merge): - """Computes elementwise minimum on a list of inputs. - - It takes as input a list of tensors, all of the same shape, - and returns a single tensor (also of the same shape). - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.Minimum()([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> # equivalent to `y = keras_core.layers.minimum([x1, x2])` - >>> y = keras_core.layers.Minimum()([x1, x2]) - >>> out = keras_core.layers.Dense(4)(y) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - - def _merge_function(self, inputs): - output = inputs[0] - for i in range(1, len(inputs)): - output = ops.minimum(output, inputs[i]) - return output - - -@keras_core_export("keras_core.layers.minimum") -def minimum(inputs, **kwargs): - """Functional interface to the `keras_core.layers.Minimum` layer. - - Args: - inputs: A list of input tensors , all of the same shape. - **kwargs: Standard layer keyword arguments. - - Returns: - A tensor as the elementwise product of the inputs with the same - shape as the inputs. - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.minimum([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> y = keras_core.layers.minimum([x1, x2]) - >>> out = keras_core.layers.Dense(4)(y) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - return Minimum(**kwargs)(inputs) diff --git a/keras_core/layers/merging/multiply.py b/keras_core/layers/merging/multiply.py deleted file mode 100644 index c14325f47..000000000 --- a/keras_core/layers/merging/multiply.py +++ /dev/null @@ -1,70 +0,0 @@ -from keras_core import ops -from keras_core.api_export import keras_core_export -from keras_core.layers.merging.base_merge import Merge - - -@keras_core_export("keras_core.layers.Multiply") -class Multiply(Merge): - """Performs elementwise multiplication. - - It takes as input a list of tensors, all of the same shape, - and returns a single tensor (also of the same shape). - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.Multiply()([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> # equivalent to `y = keras_core.layers.multiply([x1, x2])` - >>> y = keras_core.layers.Multiply()([x1, x2]) - >>> out = keras_core.layers.Dense(4)(y) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - - def _merge_function(self, inputs): - output = inputs[0] - for i in range(1, len(inputs)): - output = ops.multiply(output, inputs[i]) - return output - - -@keras_core_export("keras_core.layers.multiply") -def multiply(inputs, **kwargs): - """Functional interface to the `keras_core.layers.Multiply` layer. - - Args: - inputs: A list of input tensors , all of the same shape. - **kwargs: Standard layer keyword arguments. - - Returns: - A tensor as the elementwise product of the inputs with the same - shape as the inputs. - - Examples: - - >>> input_shape = (2, 3, 4) - >>> x1 = np.random.rand(*input_shape) - >>> x2 = np.random.rand(*input_shape) - >>> y = keras_core.layers.multiply([x1, x2]) - - Usage in a Keras model: - - >>> input1 = keras_core.layers.Input(shape=(16,)) - >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) - >>> input2 = keras_core.layers.Input(shape=(32,)) - >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) - >>> y = keras_core.layers.multiply([x1, x2]) - >>> out = keras_core.layers.Dense(4)(y) - >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) - - """ - return Multiply(**kwargs)(inputs) diff --git a/keras_core/models/__init__.py b/keras_core/models/__init__.py deleted file mode 100644 index ccfcc44c9..000000000 --- a/keras_core/models/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from keras_core.models.functional import Functional -from keras_core.models.model import Model -from keras_core.models.sequential import Sequential diff --git a/keras_core/ops/__init__.py b/keras_core/ops/__init__.py deleted file mode 100644 index 0ddf9c7d6..000000000 --- a/keras_core/ops/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# from keras_core.ops.numpy import Matmul, matmul -# from keras_core.ops.numpy import Add, add -# from keras_core.ops.numpy import Multiply, multiply - -from keras_core.backend import cast -from keras_core.backend import cond -from keras_core.backend import is_tensor -from keras_core.backend import name_scope -from keras_core.backend import random -from keras_core.ops import image -from keras_core.ops import operation_utils -from keras_core.ops.core import * # noqa: F403 -from keras_core.ops.math import * # noqa: F403 -from keras_core.ops.nn import * # noqa: F403 -from keras_core.ops.numpy import * # noqa: F403 diff --git a/keras_core/optimizers/schedules/__init__.py b/keras_core/optimizers/schedules/__init__.py deleted file mode 100644 index 30e63c9ed..000000000 --- a/keras_core/optimizers/schedules/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -from keras_core.optimizers.schedules.learning_rate_schedule import CosineDecay -from keras_core.optimizers.schedules.learning_rate_schedule import ( - CosineDecayRestarts, -) -from keras_core.optimizers.schedules.learning_rate_schedule import ( - ExponentialDecay, -) -from keras_core.optimizers.schedules.learning_rate_schedule import ( - InverseTimeDecay, -) -from keras_core.optimizers.schedules.learning_rate_schedule import ( - PiecewiseConstantDecay, -) -from keras_core.optimizers.schedules.learning_rate_schedule import ( - PolynomialDecay, -) diff --git a/keras_core/saving/__init__.py b/keras_core/saving/__init__.py deleted file mode 100644 index c59b02a37..000000000 --- a/keras_core/saving/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from keras_core.saving.object_registration import CustomObjectScope -from keras_core.saving.object_registration import custom_object_scope -from keras_core.saving.object_registration import get_custom_objects -from keras_core.saving.object_registration import get_registered_name -from keras_core.saving.object_registration import get_registered_object -from keras_core.saving.object_registration import register_keras_serializable -from keras_core.saving.saving_api import load_model -from keras_core.saving.serialization_lib import deserialize_keras_object -from keras_core.saving.serialization_lib import serialize_keras_object diff --git a/keras_core/testing/__init__.py b/keras_core/testing/__init__.py deleted file mode 100644 index 3da19e69e..000000000 --- a/keras_core/testing/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from keras_core.testing.test_case import TestCase diff --git a/keras_core/utils/__init__.py b/keras_core/utils/__init__.py deleted file mode 100644 index c1ca205f0..000000000 --- a/keras_core/utils/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -from keras_core.utils.audio_dataset_utils import audio_dataset_from_directory -from keras_core.utils.dataset_utils import split_dataset -from keras_core.utils.file_utils import get_file -from keras_core.utils.image_dataset_utils import image_dataset_from_directory -from keras_core.utils.image_utils import array_to_img -from keras_core.utils.image_utils import img_to_array -from keras_core.utils.image_utils import load_img -from keras_core.utils.image_utils import save_img -from keras_core.utils.io_utils import disable_interactive_logging -from keras_core.utils.io_utils import enable_interactive_logging -from keras_core.utils.io_utils import is_interactive_logging_enabled -from keras_core.utils.model_visualization import model_to_dot -from keras_core.utils.model_visualization import plot_model -from keras_core.utils.numerical_utils import normalize -from keras_core.utils.numerical_utils import to_categorical -from keras_core.utils.progbar import Progbar -from keras_core.utils.python_utils import default -from keras_core.utils.python_utils import is_default -from keras_core.utils.python_utils import removeprefix -from keras_core.utils.python_utils import removesuffix -from keras_core.utils.rng_utils import set_random_seed -from keras_core.utils.sequence_utils import pad_sequences -from keras_core.utils.text_dataset_utils import text_dataset_from_directory -from keras_core.utils.timeseries_dataset_utils import ( - timeseries_dataset_from_array, -) diff --git a/pip_build.py b/pip_build.py index ff229be5b..06c3271e2 100644 --- a/pip_build.py +++ b/pip_build.py @@ -1,4 +1,4 @@ -"""Script to create (and optionally install) a `.whl` archive for Keras Core. +"""Script to create (and optionally install) a `.whl` archive for Keras 3. Usage: @@ -25,7 +25,7 @@ import namex # Needed because importing torch after TF causes the runtime to crash import torch # noqa: F401 -package = "keras_core" +package = "keras" build_directory = "tmp_build_dir" dist_directory = "dist" to_copy = ["setup.py", "README.md"] @@ -36,7 +36,7 @@ def ignore_files(_, filenames): def copy_source_to_build_directory(root_path): - # Copy sources (`keras_core/` directory and setup files) to build + # Copy sources (`keras/` directory and setup files) to build # directory os.chdir(root_path) os.mkdir(build_directory) @@ -49,22 +49,22 @@ def copy_source_to_build_directory(root_path): def run_namex_conversion(): - # Restructure the codebase so that source files live in `keras_core/src` + # Restructure the codebase so that source files live in `keras/src` namex.convert_codebase(package, code_directory="src") - # Generate API __init__.py files in `keras_core/` + # Generate API __init__.py files in `keras/` namex.generate_api_files(package, code_directory="src", verbose=True) def create_legacy_directory(): - # Make keras_core/_tf_keras/ by copying keras_core/ + # Make keras/_tf_keras/ by copying keras/ tf_keras_dirpath = os.path.join(package, "_tf_keras") os.makedirs(tf_keras_dirpath) with open(os.path.join(package, "__init__.py")) as f: init_file = f.read() init_file = init_file.replace( - "from keras_core import _legacy", - "from keras_core import _tf_keras", + "from keras import _legacy", + "from keras import _tf_keras", ) with open(os.path.join(package, "__init__.py"), "w") as f: f.write(init_file) @@ -83,7 +83,7 @@ def create_legacy_directory(): ignore=ignore_files, ) - # Copy keras_core/_legacy/ file contents to keras_core/_tf_keras/ + # Copy keras/_legacy/ file contents to keras/_tf_keras/ legacy_submodules = [ path[:-3] for path in os.listdir(os.path.join(package, "src", "legacy")) @@ -109,28 +109,28 @@ def create_legacy_directory(): with open(legacy_fpath) as f: legacy_contents = f.read() legacy_contents = legacy_contents.replace( - "keras_core._legacy", "keras_core._tf_keras" + "keras._legacy", "keras._tf_keras" ) if os.path.exists(core_api_fpath): with open(core_api_fpath) as f: core_api_contents = f.read() core_api_contents = core_api_contents.replace( - "from keras_core import _tf_keras\n", "" + "from keras import _tf_keras\n", "" ) for legacy_submodule in legacy_submodules: core_api_contents = core_api_contents.replace( - f"from keras_core import {legacy_submodule}\n", + f"from keras import {legacy_submodule}\n", "", ) core_api_contents = core_api_contents.replace( - f"keras_core.{legacy_submodule}", - f"keras_core._tf_keras.{legacy_submodule}", + f"keras.{legacy_submodule}", + f"keras._tf_keras.{legacy_submodule}", ) legacy_contents = core_api_contents + "\n" + legacy_contents with open(tf_keras_fpath, "w") as f: f.write(legacy_contents) - # Delete keras_core/_legacy/ + # Delete keras/_legacy/ shutil.rmtree(os.path.join(package, "_legacy")) @@ -172,7 +172,7 @@ def build(root_path): copy_source_to_build_directory(root_path) run_namex_conversion() create_legacy_directory() - from keras_core.src.version import __version__ # noqa: E402 + from keras.src.version import __version__ # noqa: E402 export_version_string(__version__) return build_and_save_output(root_path, __version__) diff --git a/setup.py b/setup.py index db81b01f4..cf7b1e5bb 100644 --- a/setup.py +++ b/setup.py @@ -23,18 +23,18 @@ def get_version(rel_path): HERE = pathlib.Path(__file__).parent README = (HERE / "README.md").read_text() -if os.path.exists("keras_core/version.py"): - VERSION = get_version("keras_core/version.py") +if os.path.exists("keras/version.py"): + VERSION = get_version("keras/version.py") else: - VERSION = get_version("keras_core/__init__.py") + VERSION = get_version("keras/__init__.py") setup( - name="keras-core", + name="keras", description="Multi-backend Keras.", long_description_content_type="text/markdown", long_description=README, version=VERSION, - url="https://github.com/keras-team/keras-core", + url="https://github.com/keras-team/keras", author="Keras team", author_email="keras-users@googlegroups.com", license="Apache License 2.0",