diff --git a/keras_core/applications/applications_test.py b/keras_core/applications/applications_test.py index c083ee660..b53f20a7a 100644 --- a/keras_core/applications/applications_test.py +++ b/keras_core/applications/applications_test.py @@ -4,15 +4,19 @@ from absl.testing import parameterized from keras_core import backend from keras_core import testing +from keras_core.applications import convnext from keras_core.applications import densenet from keras_core.applications import efficientnet from keras_core.applications import efficientnet_v2 +from keras_core.applications import inception_resnet_v2 +from keras_core.applications import inception_v3 from keras_core.applications import mobilenet from keras_core.applications import mobilenet_v2 from keras_core.applications import mobilenet_v3 from keras_core.applications import vgg16 from keras_core.applications import vgg19 from keras_core.applications import xception +from keras_core.saving import serialization_lib from keras_core.utils import file_utils from keras_core.utils import image_utils @@ -27,7 +31,10 @@ MODEL_LIST = [ (vgg19.VGG19, 512, vgg19), # xception (xception.Xception, 2048, xception), - # mobilnet + # inception + (inception_v3.InceptionV3, 2048, inception_v3), + (inception_resnet_v2.InceptionResNetV2, 1536, inception_resnet_v2), + # mobilenet (mobilenet.MobileNet, 1024, mobilenet), (mobilenet_v2.MobileNetV2, 1280, mobilenet_v2), (mobilenet_v3.MobileNetV3Small, 576, mobilenet_v3), @@ -52,6 +59,12 @@ MODEL_LIST = [ (densenet.DenseNet121, 1024, densenet), (densenet.DenseNet169, 1664, densenet), (densenet.DenseNet201, 1920, densenet), + # convnext + (convnext.ConvNeXtTiny, 768, convnext), + (convnext.ConvNeXtSmall, 768, convnext), + (convnext.ConvNeXtBase, 1024, convnext), + (convnext.ConvNeXtLarge, 1536, convnext), + (convnext.ConvNeXtXLarge, 2048, convnext), ] # Add names for `named_parameters`. MODEL_LIST = [(e[0].__name__, *e) for e in MODEL_LIST] @@ -111,8 +124,8 @@ class ApplicationsTest(testing.TestCase, parameterized.TestCase): self.assertIn("African_elephant", names[:3]) # Can be serialized and deserialized - config = model.get_config() - reconstructed_model = model.__class__.from_config(config) + config = serialization_lib.serialize_keras_object(model) + reconstructed_model = serialization_lib.deserialize_keras_object(config) self.assertEqual(len(model.weights), len(reconstructed_model.weights)) @parameterized.named_parameters(MODEL_LIST) diff --git a/keras_core/applications/convnext.py b/keras_core/applications/convnext.py new file mode 100644 index 000000000..fd388af2b --- /dev/null +++ b/keras_core/applications/convnext.py @@ -0,0 +1,752 @@ +import numpy as np +from tensorflow.io import gfile + +from keras_core import backend +from keras_core import initializers +from keras_core import layers +from keras_core import operations as ops +from keras_core import random +from keras_core.api_export import keras_core_export +from keras_core.applications import imagenet_utils +from keras_core.layers.layer import Layer +from keras_core.models import Functional +from keras_core.models import Sequential +from keras_core.operations import operation_utils +from keras_core.utils import file_utils + +BASE_WEIGHTS_PATH = ( + "https://storage.googleapis.com/tensorflow/keras-applications/convnext/" +) + +WEIGHTS_HASHES = { + "convnext_tiny": ( + "8ae6e78ce2933352b1ef4008e6dd2f17bc40771563877d156bc6426c7cf503ff", + "d547c096cabd03329d7be5562c5e14798aa39ed24b474157cef5e85ab9e49ef1", + ), + "convnext_small": ( + "ce1277d8f1ee5a0ef0e171469089c18f5233860ceaf9b168049cb9263fd7483c", + "6fc8009faa2f00c1c1dfce59feea9b0745eb260a7dd11bee65c8e20843da6eab", + ), + "convnext_base": ( + "52cbb006d3dadd03f6e095a8ca1aca47aecdd75acb4bc74bce1f5c695d0086e6", + "40a20c5548a5e9202f69735ecc06c990e6b7c9d2de39f0361e27baeb24cb7c45", + ), + "convnext_large": ( + "070c5ed9ed289581e477741d3b34beffa920db8cf590899d6d2c67fba2a198a6", + "96f02b6f0753d4f543261bc9d09bed650f24dd6bc02ddde3066135b63d23a1cd", + ), + "convnext_xlarge": ( + "c1f5ccab661354fc3a79a10fa99af82f0fbf10ec65cb894a3ae0815f17a889ee", + "de3f8a54174130e0cecdc71583354753d557fcf1f4487331558e2a16ba0cfe05", + ), +} + + +MODEL_CONFIGS = { + "tiny": { + "depths": [3, 3, 9, 3], + "projection_dims": [96, 192, 384, 768], + "default_size": 224, + }, + "small": { + "depths": [3, 3, 27, 3], + "projection_dims": [96, 192, 384, 768], + "default_size": 224, + }, + "base": { + "depths": [3, 3, 27, 3], + "projection_dims": [128, 256, 512, 1024], + "default_size": 224, + }, + "large": { + "depths": [3, 3, 27, 3], + "projection_dims": [192, 384, 768, 1536], + "default_size": 224, + }, + "xlarge": { + "depths": [3, 3, 27, 3], + "projection_dims": [256, 512, 1024, 2048], + "default_size": 224, + }, +} + +BASE_DOCSTRING = """Instantiates the {name} architecture. + +References: +- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) +(CVPR 2022) + +For image classification use cases, see +[this page for detailed examples]( +https://keras.io/api/applications/#usage-examples-for-image-classification-models). +For transfer learning use cases, make sure to read the +[guide to transfer learning & fine-tuning]( +https://keras.io/guides/transfer_learning/). + +The `base`, `large`, and `xlarge` models were first pre-trained on the +ImageNet-21k dataset and then fine-tuned on the ImageNet-1k dataset. The +pre-trained parameters of the models were assembled from the +[official repository](https://github.com/facebookresearch/ConvNeXt). To get a +sense of how these parameters were converted to Keras compatible parameters, +please refer to +[this repository](https://github.com/sayakpaul/keras-convnext-conversion). + +Note: Each Keras Application expects a specific kind of input preprocessing. +For ConvNeXt, preprocessing is included in the model using a `Normalization` +layer. ConvNeXt models expect their inputs to be float or uint8 tensors of +pixels with values in the [0-255] range. + +When calling the `summary()` method after instantiating a ConvNeXt model, +prefer setting the `expand_nested` argument `summary()` to `True` to better +investigate the instantiated model. + +Args: + include_top: Whether to include the fully-connected + layer at the top of the network. Defaults to `True`. + weights: One of `None` (random initialization), + `"imagenet"` (pre-training on ImageNet-1k), or the path to the weights + file to be loaded. Defaults to `"imagenet"`. + input_tensor: Optional Keras tensor + (i.e. output of `layers.Input()`) + to use as image input for the model. + input_shape: Optional shape tuple, only to be specified + if `include_top` is `False`. + It should have exactly 3 inputs channels. + pooling: Optional pooling mode for feature extraction + when `include_top` is `False`. Defaults to None. + - `None` means that the output of the model will be + the 4D tensor output of the last convolutional layer. + - `avg` means that global average pooling + will be applied to the output of the + last convolutional layer, and thus + the output of the model will be a 2D tensor. + - `max` means that global max pooling will + be applied. + classes: Optional number of classes to classify images + into, only to be specified if `include_top` is `True`, and + if no `weights` argument is specified. Defaults to 1000 (number of + ImageNet classes). + classifier_activation: A `str` or callable. The activation function to use + on the "top" layer. Ignored unless `include_top=True`. Set + `classifier_activation=None` to return the logits of the "top" layer. + Defaults to `"softmax"`. + When loading pretrained weights, `classifier_activation` can only + be `None` or `"softmax"`. + +Returns: + A model instance. +""" + + +class StochasticDepth(Layer): + """Stochastic Depth module. + + It performs batch-wise dropping rather than sample-wise. In libraries like + `timm`, it's similar to `DropPath` layers that drops residual paths + sample-wise. + + References: + - https://github.com/rwightman/pytorch-image-models + + Args: + drop_path_rate (float): Probability of dropping paths. Should be within + [0, 1]. + + Returns: + Tensor either with the residual path dropped or kept. + """ + + def __init__(self, drop_path_rate, **kwargs): + super().__init__(**kwargs) + self.drop_path_rate = drop_path_rate + + def call(self, x, training=None): + if training: + keep_prob = 1 - self.drop_path_rate + shape = (ops.shape(x)[0],) + (1,) * (len(ops.shape(x)) - 1) + random_tensor = keep_prob + random.uniform(shape, 0, 1) + random_tensor = ops.floor(random_tensor) + return (x / keep_prob) * random_tensor + return x + + def get_config(self): + config = super().get_config() + config.update({"drop_path_rate": self.drop_path_rate}) + return config + + +class LayerScale(Layer): + """Layer scale module. + + References: + + - https://arxiv.org/abs/2103.17239 + + Args: + init_values (float): Initial value for layer scale. Should be within + [0, 1]. + projection_dim (int): Projection dimensionality. + + Returns: + Tensor multiplied to the scale. + """ + + def __init__(self, init_values, projection_dim, **kwargs): + super().__init__(**kwargs) + self.init_values = init_values + self.projection_dim = projection_dim + + def build(self, _): + self.gamma = self.add_weight( + shape=(self.projection_dim,), + initializer=initializers.Constant(self.init_values), + trainable=True, + ) + + def call(self, x): + return x * self.gamma + + def get_config(self): + config = super().get_config() + config.update( + { + "init_values": self.init_values, + "projection_dim": self.projection_dim, + } + ) + return config + + +def ConvNeXtBlock( + projection_dim, drop_path_rate=0.0, layer_scale_init_value=1e-6, name=None +): + """ConvNeXt block. + + References: + - https://arxiv.org/abs/2201.03545 + - https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py + + Notes: + In the original ConvNeXt implementation (linked above), the authors use + `Dense` layers for pointwise convolutions for increased efficiency. + Following that, this implementation also uses the same. + + Args: + projection_dim (int): Number of filters for convolution layers. In the + ConvNeXt paper, this is referred to as projection dimension. + drop_path_rate (float): Probability of dropping paths. Should be within + [0, 1]. + layer_scale_init_value (float): Layer scale value. + Should be a small float number. + name: name to path to the keras layer. + + Returns: + A function representing a ConvNeXtBlock block. + """ + if name is None: + name = "prestem" + str(backend.get_uid("prestem")) + + def apply(inputs): + x = inputs + + x = layers.Conv2D( + filters=projection_dim, + kernel_size=7, + padding="same", + groups=projection_dim, + name=name + "_depthwise_conv", + )(x) + x = layers.LayerNormalization(epsilon=1e-6, name=name + "_layernorm")(x) + x = layers.Dense(4 * projection_dim, name=name + "_pointwise_conv_1")(x) + x = layers.Activation("gelu", name=name + "_gelu")(x) + x = layers.Dense(projection_dim, name=name + "_pointwise_conv_2")(x) + + if layer_scale_init_value is not None: + x = LayerScale( + layer_scale_init_value, + projection_dim, + name=name + "_layer_scale", + )(x) + if drop_path_rate: + layer = StochasticDepth( + drop_path_rate, name=name + "_stochastic_depth" + ) + else: + layer = layers.Activation("linear", name=name + "_identity") + + return inputs + layer(x) + + return apply + + +def PreStem(name=None): + """Normalizes inputs with ImageNet-1k mean and std.""" + if name is None: + name = "prestem" + str(backend.get_uid("prestem")) + + def apply(x): + x = layers.Normalization( + mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], + variance=[ + (0.229 * 255) ** 2, + (0.224 * 255) ** 2, + (0.225 * 255) ** 2, + ], + name=name + "_prestem_normalization", + )(x) + return x + + return apply + + +def Head(num_classes=1000, classifier_activation=None, name=None): + """Implementation of classification head of ConvNeXt. + + Args: + num_classes: number of classes for Dense layer + classifier_activation: activation function for the Dense layer + name: name prefix + + Returns: + Classification head function. + """ + if name is None: + name = str(backend.get_uid("head")) + + def apply(x): + x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x) + x = layers.LayerNormalization( + epsilon=1e-6, name=name + "_head_layernorm" + )(x) + x = layers.Dense( + num_classes, + activation=classifier_activation, + name=name + "_head_dense", + )(x) + return x + + return apply + + +def ConvNeXt( + depths, + projection_dims, + drop_path_rate=0.0, + layer_scale_init_value=1e-6, + default_size=224, + model_name="convnext", + include_preprocessing=True, + include_top=True, + weights=None, + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", +): + """Instantiates ConvNeXt architecture given specific configuration. + + Args: + depths: An iterable containing depths for each individual stages. + projection_dims: An iterable containing output number of channels of + each individual stages. + drop_path_rate: Stochastic depth probability. If 0.0, then stochastic + depth won't be used. + layer_scale_init_value: Layer scale coefficient. If 0.0, layer scaling + won't be used. + default_size: Default input image size. + model_name: An optional name for the model. + include_preprocessing: boolean denoting whther to + include preprocessing in the model. + When `weights="imagenet"` this should always be `True`. + But for other models (e.g., randomly initialized) you should set it + to `False` and apply preprocessing to data accordingly. + include_top: Boolean denoting whether to include classification + head to the model. + weights: one of `None` (random initialization), `"imagenet"` + (pre-training on ImageNet-1k), + or the path to the weights file to be loaded. + input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to + use as image input for the model. + input_shape: optional shape tuple, only to be specified if `include_top` + is `False`. It should have exactly 3 inputs channels. + pooling: optional pooling mode for feature extraction when `include_top` + is `False`. + - `None` means that the output of the model will be + the 4D tensor output of the last convolutional layer. + - `avg` means that global average pooling will be applied + to the output of the last convolutional layer, + and thus the output of the model will be a 2D tensor. + - `max` means that global max pooling will be applied. + classes: optional number of classes to classify images into, + only to be specified if `include_top` is `True`, + and if no `weights` argument is specified. + classifier_activation: A `str` or callable. + The activation function to use + on the "top" layer. Ignored unless `include_top=True`. + Set `classifier_activation=None` to return the logits + of the "top" layer. + + Returns: + A model instance. + """ + if not (weights in {"imagenet", None} or gfile.exists(weights)): + raise ValueError( + "The `weights` argument should be either " + "`None` (random initialization), `imagenet` " + "(pre-training on ImageNet), " + "or the path to the weights file to be loaded." + ) + + if weights == "imagenet" and include_top and classes != 1000: + raise ValueError( + 'If using `weights="imagenet"` with `include_top=True`, ' + "`classes` should be 1000. " + f"Received classes={classes}" + ) + + # Determine proper input shape. + input_shape = imagenet_utils.obtain_input_shape( + input_shape, + default_size=default_size, + min_size=32, + data_format=backend.image_data_format(), + require_flatten=include_top, + weights=weights, + ) + + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + else: + if not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor)[0] + else: + inputs = img_input + + x = inputs + if include_preprocessing: + channel_axis = ( + 3 if backend.image_data_format() == "channels_last" else 1 + ) + num_channels = input_shape[channel_axis - 1] + if num_channels == 3: + x = PreStem(name=model_name)(x) + + # Stem block. + stem = Sequential( + [ + layers.Conv2D( + projection_dims[0], + kernel_size=4, + strides=4, + name=model_name + "_stem_conv", + ), + layers.LayerNormalization( + epsilon=1e-6, name=model_name + "_stem_layernorm" + ), + ], + name=model_name + "_stem", + ) + + # Downsampling blocks. + downsample_layers = [] + downsample_layers.append(stem) + + num_downsample_layers = 3 + for i in range(num_downsample_layers): + downsample_layer = Sequential( + [ + layers.LayerNormalization( + epsilon=1e-6, + name=model_name + "_downsampling_layernorm_" + str(i), + ), + layers.Conv2D( + projection_dims[i + 1], + kernel_size=2, + strides=2, + name=model_name + "_downsampling_conv_" + str(i), + ), + ], + name=model_name + "_downsampling_block_" + str(i), + ) + downsample_layers.append(downsample_layer) + + # Stochastic depth schedule. + # This is referred from the original ConvNeXt codebase: + # https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py#L86 + depth_drop_rates = [ + float(x) for x in np.linspace(0.0, drop_path_rate, sum(depths)) + ] + + # First apply downsampling blocks and then apply ConvNeXt stages. + cur = 0 + + num_convnext_blocks = 4 + for i in range(num_convnext_blocks): + x = downsample_layers[i](x) + for j in range(depths[i]): + x = ConvNeXtBlock( + projection_dim=projection_dims[i], + drop_path_rate=depth_drop_rates[cur + j], + layer_scale_init_value=layer_scale_init_value, + name=model_name + f"_stage_{i}_block_{j}", + )(x) + cur += depths[i] + + if include_top: + imagenet_utils.validate_activation(classifier_activation, weights) + x = Head( + num_classes=classes, + classifier_activation=classifier_activation, + name=model_name, + )(x) + + else: + if pooling == "avg": + x = layers.GlobalAveragePooling2D()(x) + elif pooling == "max": + x = layers.GlobalMaxPooling2D()(x) + x = layers.LayerNormalization(epsilon=1e-6)(x) + + model = Functional(inputs=inputs, outputs=x, name=model_name) + + # Load weights. + if weights == "imagenet": + if include_top: + file_suffix = ".h5" + file_hash = WEIGHTS_HASHES[model_name][0] + else: + file_suffix = "_notop.h5" + file_hash = WEIGHTS_HASHES[model_name][1] + file_name = model_name + file_suffix + weights_path = file_utils.get_file( + file_name, + BASE_WEIGHTS_PATH + file_name, + cache_subdir="models", + file_hash=file_hash, + ) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + + return model + + +## Instantiating variants ## + + +@keras_core_export( + [ + "keras_core.applications.convnext.ConvNeXtTiny", + "keras_core.applications.ConvNeXtTiny", + ] +) +def ConvNeXtTiny( + model_name="convnext_tiny", + include_top=True, + include_preprocessing=True, + weights="imagenet", + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", +): + return ConvNeXt( + depths=MODEL_CONFIGS["tiny"]["depths"], + projection_dims=MODEL_CONFIGS["tiny"]["projection_dims"], + drop_path_rate=0.0, + layer_scale_init_value=1e-6, + default_size=MODEL_CONFIGS["tiny"]["default_size"], + model_name=model_name, + include_top=include_top, + include_preprocessing=include_preprocessing, + weights=weights, + input_tensor=input_tensor, + input_shape=input_shape, + pooling=pooling, + classes=classes, + classifier_activation=classifier_activation, + ) + + +@keras_core_export( + [ + "keras_core.applications.convnext.ConvNeXtSmall", + "keras_core.applications.ConvNeXtSmall", + ] +) +def ConvNeXtSmall( + model_name="convnext_small", + include_top=True, + include_preprocessing=True, + weights="imagenet", + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", +): + return ConvNeXt( + depths=MODEL_CONFIGS["small"]["depths"], + projection_dims=MODEL_CONFIGS["small"]["projection_dims"], + drop_path_rate=0.0, + layer_scale_init_value=1e-6, + default_size=MODEL_CONFIGS["small"]["default_size"], + model_name=model_name, + include_top=include_top, + include_preprocessing=include_preprocessing, + weights=weights, + input_tensor=input_tensor, + input_shape=input_shape, + pooling=pooling, + classes=classes, + classifier_activation=classifier_activation, + ) + + +@keras_core_export( + [ + "keras_core.applications.convnext.ConvNeXtBase", + "keras_core.applications.ConvNeXtBase", + ] +) +def ConvNeXtBase( + model_name="convnext_base", + include_top=True, + include_preprocessing=True, + weights="imagenet", + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", +): + return ConvNeXt( + depths=MODEL_CONFIGS["base"]["depths"], + projection_dims=MODEL_CONFIGS["base"]["projection_dims"], + drop_path_rate=0.0, + layer_scale_init_value=1e-6, + default_size=MODEL_CONFIGS["base"]["default_size"], + model_name=model_name, + include_top=include_top, + include_preprocessing=include_preprocessing, + weights=weights, + input_tensor=input_tensor, + input_shape=input_shape, + pooling=pooling, + classes=classes, + classifier_activation=classifier_activation, + ) + + +@keras_core_export( + [ + "keras_core.applications.convnext.ConvNeXtLarge", + "keras_core.applications.ConvNeXtLarge", + ] +) +def ConvNeXtLarge( + model_name="convnext_large", + include_top=True, + include_preprocessing=True, + weights="imagenet", + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", +): + return ConvNeXt( + depths=MODEL_CONFIGS["large"]["depths"], + projection_dims=MODEL_CONFIGS["large"]["projection_dims"], + drop_path_rate=0.0, + layer_scale_init_value=1e-6, + default_size=MODEL_CONFIGS["large"]["default_size"], + model_name=model_name, + include_top=include_top, + include_preprocessing=include_preprocessing, + weights=weights, + input_tensor=input_tensor, + input_shape=input_shape, + pooling=pooling, + classes=classes, + classifier_activation=classifier_activation, + ) + + +@keras_core_export( + [ + "keras_core.applications.convnext.ConvNeXtXLarge", + "keras_core.applications.ConvNeXtXLarge", + ] +) +def ConvNeXtXLarge( + model_name="convnext_xlarge", + include_top=True, + include_preprocessing=True, + weights="imagenet", + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", +): + return ConvNeXt( + depths=MODEL_CONFIGS["xlarge"]["depths"], + projection_dims=MODEL_CONFIGS["xlarge"]["projection_dims"], + drop_path_rate=0.0, + layer_scale_init_value=1e-6, + default_size=MODEL_CONFIGS["xlarge"]["default_size"], + model_name=model_name, + include_top=include_top, + include_preprocessing=include_preprocessing, + weights=weights, + input_tensor=input_tensor, + input_shape=input_shape, + pooling=pooling, + classes=classes, + classifier_activation=classifier_activation, + ) + + +ConvNeXtTiny.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtTiny") +ConvNeXtSmall.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtSmall") +ConvNeXtBase.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtBase") +ConvNeXtLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtLarge") +ConvNeXtXLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtXLarge") + + +@keras_core_export("keras_core.applications.convnext.preprocess_input") +def preprocess_input(x, data_format=None): + """A placeholder method for backward compatibility. + + The preprocessing logic has been included in the convnext model + implementation. Users are no longer required to call this method to + normalize the input data. This method does nothing and only kept as a + placeholder to align the API surface between old and new version of model. + + Args: + x: A floating point `numpy.array` or a tensor. + data_format: Optional data format of the image tensor/array. Defaults to + None, in which case the global setting + `keras_core.backend.image_data_format()` is used + (unless you changed it, it defaults to `"channels_last"`).{mode} + + Returns: + Unchanged `numpy.array` or tensor. + """ + return x + + +@keras_core_export("keras_core.applications.convnext.decode_predictions") +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) + + +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ diff --git a/keras_core/applications/inception_resnet_v2.py b/keras_core/applications/inception_resnet_v2.py new file mode 100644 index 000000000..b4168a421 --- /dev/null +++ b/keras_core/applications/inception_resnet_v2.py @@ -0,0 +1,400 @@ +from tensorflow.io import gfile + +from keras_core import backend +from keras_core import layers +from keras_core.api_export import keras_core_export +from keras_core.applications import imagenet_utils +from keras_core.layers.layer import Layer +from keras_core.models import Functional +from keras_core.operations import operation_utils +from keras_core.utils import file_utils + +BASE_WEIGHT_URL = ( + "https://storage.googleapis.com/tensorflow/" + "keras-applications/inception_resnet_v2/" +) + + +@keras_core_export( + [ + "keras_core.applications.inception_resnet_v2.InceptionResNetV2", + "keras_core.applications.InceptionResNetV2", + ] +) +def InceptionResNetV2( + include_top=True, + weights="imagenet", + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", +): + """Instantiates the Inception-ResNet v2 architecture. + + Reference: + - [Inception-v4, Inception-ResNet and the Impact of + Residual Connections on Learning](https://arxiv.org/abs/1602.07261) + (AAAI 2017) + + This function returns a Keras image classification model, + optionally loaded with weights pre-trained on ImageNet. + + For image classification use cases, see + [this page for detailed examples]( + https://keras.io/api/applications/#usage-examples-for-image-classification-models). + + For transfer learning use cases, make sure to read the + [guide to transfer learning & fine-tuning]( + https://keras.io/guides/transfer_learning/). + + Note: each Keras Application expects a specific kind of + input preprocessing. For InceptionResNetV2, call + `keras_core.applications.inception_resnet_v2.preprocess_input` + on your inputs before passing them to the model. + `inception_resnet_v2.preprocess_input` + will scale input pixels between -1 and 1. + + Args: + include_top: whether to include the fully-connected + layer at the top of the network. + weights: one of `None` (random initialization), + `"imagenet"` (pre-training on ImageNet), + or the path to the weights file to be loaded. + input_tensor: optional Keras tensor + (i.e. output of `layers.Input()`) + to use as image input for the model. + input_shape: optional shape tuple, only to be specified + if `include_top` is `False` (otherwise the input shape + has to be `(299, 299, 3)` + (with `'channels_last'` data format) + or `(3, 299, 299)` (with `'channels_first'` data format). + It should have exactly 3 inputs channels, + and width and height should be no smaller than 75. + E.g. `(150, 150, 3)` would be one valid value. + pooling: Optional pooling mode for feature extraction + when `include_top` is `False`. + - `None` means that the output of the model will be + the 4D tensor output of the last convolutional block. + - `'avg'` means that global average pooling + will be applied to the output of the + last convolutional block, and thus + the output of the model will be a 2D tensor. + - `'max'` means that global max pooling will be applied. + classes: optional number of classes to classify images + into, only to be specified if `include_top` is `True`, + and if no `weights` argument is specified. + classifier_activation: A `str` or callable. + The activation function to use on the "top" layer. + Ignored unless `include_top=True`. + Set `classifier_activation=None` to return the logits + of the "top" layer. When loading pretrained weights, + `classifier_activation` can only be `None` or `"softmax"`. + + Returns: + A model instance. + """ + if not (weights in {"imagenet", None} or gfile.exists(weights)): + raise ValueError( + "The `weights` argument should be either " + "`None` (random initialization), `imagenet` " + "(pre-training on ImageNet), " + "or the path to the weights file to be loaded." + ) + + if weights == "imagenet" and include_top and classes != 1000: + raise ValueError( + 'If using `weights="imagenet"` with `include_top=True`, ' + "`classes` should be 1000. " + f"Received classes={classes}" + ) + + # Determine proper input shape + input_shape = imagenet_utils.obtain_input_shape( + input_shape, + default_size=299, + min_size=75, + data_format=backend.image_data_format(), + require_flatten=include_top, + weights=weights, + ) + + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + else: + if not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + + # Stem block: 35 x 35 x 192 + x = conv2d_bn(img_input, 32, 3, strides=2, padding="valid") + x = conv2d_bn(x, 32, 3, padding="valid") + x = conv2d_bn(x, 64, 3) + x = layers.MaxPooling2D(3, strides=2)(x) + x = conv2d_bn(x, 80, 1, padding="valid") + x = conv2d_bn(x, 192, 3, padding="valid") + x = layers.MaxPooling2D(3, strides=2)(x) + + # Mixed 5b (Inception-A block): 35 x 35 x 320 + branch_0 = conv2d_bn(x, 96, 1) + branch_1 = conv2d_bn(x, 48, 1) + branch_1 = conv2d_bn(branch_1, 64, 5) + branch_2 = conv2d_bn(x, 64, 1) + branch_2 = conv2d_bn(branch_2, 96, 3) + branch_2 = conv2d_bn(branch_2, 96, 3) + branch_pool = layers.AveragePooling2D(3, strides=1, padding="same")(x) + branch_pool = conv2d_bn(branch_pool, 64, 1) + branches = [branch_0, branch_1, branch_2, branch_pool] + channel_axis = 1 if backend.image_data_format() == "channels_first" else 3 + x = layers.Concatenate(axis=channel_axis, name="mixed_5b")(branches) + + # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320 + for block_idx in range(1, 11): + x = inception_resnet_block( + x, scale=0.17, block_type="block35", block_idx=block_idx + ) + + # Mixed 6a (Reduction-A block): 17 x 17 x 1088 + branch_0 = conv2d_bn(x, 384, 3, strides=2, padding="valid") + branch_1 = conv2d_bn(x, 256, 1) + branch_1 = conv2d_bn(branch_1, 256, 3) + branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding="valid") + branch_pool = layers.MaxPooling2D(3, strides=2, padding="valid")(x) + branches = [branch_0, branch_1, branch_pool] + x = layers.Concatenate(axis=channel_axis, name="mixed_6a")(branches) + + # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088 + for block_idx in range(1, 21): + x = inception_resnet_block( + x, scale=0.1, block_type="block17", block_idx=block_idx + ) + + # Mixed 7a (Reduction-B block): 8 x 8 x 2080 + branch_0 = conv2d_bn(x, 256, 1) + branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding="valid") + branch_1 = conv2d_bn(x, 256, 1) + branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding="valid") + branch_2 = conv2d_bn(x, 256, 1) + branch_2 = conv2d_bn(branch_2, 288, 3) + branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding="valid") + branch_pool = layers.MaxPooling2D(3, strides=2, padding="valid")(x) + branches = [branch_0, branch_1, branch_2, branch_pool] + x = layers.Concatenate(axis=channel_axis, name="mixed_7a")(branches) + + # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080 + for block_idx in range(1, 10): + x = inception_resnet_block( + x, scale=0.2, block_type="block8", block_idx=block_idx + ) + x = inception_resnet_block( + x, scale=1.0, activation=None, block_type="block8", block_idx=10 + ) + + # Final convolution block: 8 x 8 x 1536 + x = conv2d_bn(x, 1536, 1, name="conv_7b") + + if include_top: + # Classification block + x = layers.GlobalAveragePooling2D(name="avg_pool")(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense( + classes, activation=classifier_activation, name="predictions" + )(x) + else: + if pooling == "avg": + x = layers.GlobalAveragePooling2D()(x) + elif pooling == "max": + x = layers.GlobalMaxPooling2D()(x) + + # Ensure that the model takes into account + # any potential predecessors of `input_tensor`. + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + + # Create model. + model = Functional(inputs, x, name="inception_resnet_v2") + + # Load weights. + if weights == "imagenet": + if include_top: + fname = "inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5" + weights_path = file_utils.get_file( + fname, + BASE_WEIGHT_URL + fname, + cache_subdir="models", + file_hash="e693bd0210a403b3192acc6073ad2e96", + ) + else: + fname = ( + "inception_resnet_v2_weights_" + "tf_dim_ordering_tf_kernels_notop.h5" + ) + weights_path = file_utils.get_file( + fname, + BASE_WEIGHT_URL + fname, + cache_subdir="models", + file_hash="d19885ff4a710c122648d3b5c3b684e4", + ) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + + return model + + +def conv2d_bn( + x, + filters, + kernel_size, + strides=1, + padding="same", + activation="relu", + use_bias=False, + name=None, +): + """Utility function to apply conv + BN. + + Args: + x: input tensor. + filters: filters in `Conv2D`. + kernel_size: kernel size as in `Conv2D`. + strides: strides in `Conv2D`. + padding: padding mode in `Conv2D`. + activation: activation in `Conv2D`. + use_bias: whether to use a bias in `Conv2D`. + name: name of the ops; will become `name + '_ac'` + for the activation and `name + '_bn'` for the batch norm layer. + + Returns: + Output tensor after applying `Conv2D` and `BatchNormalization`. + """ + x = layers.Conv2D( + filters, + kernel_size, + strides=strides, + padding=padding, + use_bias=use_bias, + name=name, + )(x) + if not use_bias: + bn_axis = 1 if backend.image_data_format() == "channels_first" else 3 + bn_name = None if name is None else name + "_bn" + x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)( + x + ) + if activation is not None: + ac_name = None if name is None else name + "_ac" + x = layers.Activation(activation, name=ac_name)(x) + return x + + +class CustomScaleLayer(Layer): + def __init__(self, scale, **kwargs): + super().__init__(**kwargs) + self.scale = scale + + def get_config(self): + config = super().get_config() + config.update({"scale": self.scale}) + return config + + def call(self, inputs): + return inputs[0] + inputs[1] * self.scale + + +def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"): + """Adds an Inception-ResNet block. + + Args: + x: input tensor. + scale: scaling factor to scale the residuals + (i.e., the output of passing `x` through an inception module) + before adding them to the shortcut + branch. Let `r` be the output from the residual branch, + the output of this block will be `x + scale * r`. + block_type: `'block35'`, `'block17'` or `'block8'`, + determines the network structure in the residual branch. + block_idx: an `int` used for generating layer names. + The Inception-ResNet blocks are repeated many times + in this network. We use `block_idx` to identify each + of the repetitions. For example, the first + Inception-ResNet-A block will have + `block_type='block35', block_idx=0`, and the layer names + will have a common prefix `'block35_0'`. + activation: activation function to use at the end of the block. + + Returns: + Output tensor for the block. + """ + if block_type == "block35": + branch_0 = conv2d_bn(x, 32, 1) + branch_1 = conv2d_bn(x, 32, 1) + branch_1 = conv2d_bn(branch_1, 32, 3) + branch_2 = conv2d_bn(x, 32, 1) + branch_2 = conv2d_bn(branch_2, 48, 3) + branch_2 = conv2d_bn(branch_2, 64, 3) + branches = [branch_0, branch_1, branch_2] + elif block_type == "block17": + branch_0 = conv2d_bn(x, 192, 1) + branch_1 = conv2d_bn(x, 128, 1) + branch_1 = conv2d_bn(branch_1, 160, [1, 7]) + branch_1 = conv2d_bn(branch_1, 192, [7, 1]) + branches = [branch_0, branch_1] + elif block_type == "block8": + branch_0 = conv2d_bn(x, 192, 1) + branch_1 = conv2d_bn(x, 192, 1) + branch_1 = conv2d_bn(branch_1, 224, [1, 3]) + branch_1 = conv2d_bn(branch_1, 256, [3, 1]) + branches = [branch_0, branch_1] + else: + raise ValueError( + "Unknown Inception-ResNet block type. " + 'Expects "block35", "block17" or "block8", ' + "but got: " + str(block_type) + ) + + block_name = block_type + "_" + str(block_idx) + channel_axis = 1 if backend.image_data_format() == "channels_first" else 3 + mixed = layers.Concatenate(axis=channel_axis, name=block_name + "_mixed")( + branches + ) + up = conv2d_bn( + mixed, + x.shape[channel_axis], + 1, + activation=None, + use_bias=True, + name=block_name + "_conv", + ) + + x = CustomScaleLayer(scale)([x, up]) + if activation is not None: + x = layers.Activation(activation, name=block_name + "_ac")(x) + return x + + +@keras_core_export( + "keras_core.applications.inception_resnet_v2.preprocess_input" +) +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input( + x, data_format=data_format, mode="tf" + ) + + +@keras_core_export( + "keras_core.applications.inception_resnet_v2.decode_predictions" +) +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) + + +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format( + mode="", + ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, + error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC, +) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ diff --git a/keras_core/applications/inception_v3.py b/keras_core/applications/inception_v3.py new file mode 100644 index 000000000..35f7f0cee --- /dev/null +++ b/keras_core/applications/inception_v3.py @@ -0,0 +1,442 @@ +from tensorflow.io import gfile + +from keras_core import backend +from keras_core import layers +from keras_core.api_export import keras_core_export +from keras_core.applications import imagenet_utils +from keras_core.models import Functional +from keras_core.operations import operation_utils +from keras_core.utils import file_utils + +WEIGHTS_PATH = ( + "https://storage.googleapis.com/tensorflow/keras-applications/" + "inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels.h5" +) +WEIGHTS_PATH_NO_TOP = ( + "https://storage.googleapis.com/tensorflow/keras-applications/" + "inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5" +) + + +@keras_core_export( + [ + "keras_core.applications.inception_v3.InceptionV3", + "keras_core.applications.InceptionV3", + ] +) +def InceptionV3( + include_top=True, + weights="imagenet", + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", +): + """Instantiates the Inception v3 architecture. + + Reference: + - [Rethinking the Inception Architecture for Computer Vision]( + http://arxiv.org/abs/1512.00567) (CVPR 2016) + + This function returns a Keras image classification model, + optionally loaded with weights pre-trained on ImageNet. + + For image classification use cases, see + [this page for detailed examples]( + https://keras.io/api/applications/#usage-examples-for-image-classification-models). + + For transfer learning use cases, make sure to read the + [guide to transfer learning & fine-tuning]( + https://keras.io/guides/transfer_learning/). + + Note: each Keras Application expects a specific kind of input preprocessing. + For `InceptionV3`, call + `keras_core.applications.inception_v3.preprocess_input` on your inputs + before passing them to the model. + `inception_v3.preprocess_input` will scale input pixels between -1 and 1. + + Args: + include_top: Boolean, whether to include the fully-connected + layer at the top, as the last layer of the network. + Defaults to `True`. + weights: One of `None` (random initialization), + `imagenet` (pre-training on ImageNet), + or the path to the weights file to be loaded. + Defaults to `"imagenet"`. + input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) + to use as image input for the model. `input_tensor` is useful for + sharing inputs between multiple different networks. + Defaults to `None`. + input_shape: Optional shape tuple, only to be specified + if `include_top` is False (otherwise the input shape + has to be `(299, 299, 3)` (with `channels_last` data format) + or `(3, 299, 299)` (with `channels_first` data format). + It should have exactly 3 inputs channels, + and width and height should be no smaller than 75. + E.g. `(150, 150, 3)` would be one valid value. + `input_shape` will be ignored if the `input_tensor` is provided. + pooling: Optional pooling mode for feature extraction + when `include_top` is `False`. + - `None` (default) means that the output of the model will be + the 4D tensor output of the last convolutional block. + - `avg` means that global average pooling + will be applied to the output of the + last convolutional block, and thus + the output of the model will be a 2D tensor. + - `max` means that global max pooling will be applied. + classes: optional number of classes to classify images + into, only to be specified if `include_top` is `True`, and + if no `weights` argument is specified. Defaults to 1000. + classifier_activation: A `str` or callable. The activation function + to use on the "top" layer. Ignored unless `include_top=True`. + Set `classifier_activation=None` to return the logits of the "top" + layer. When loading pretrained weights, `classifier_activation` + can only be `None` or `"softmax"`. + + Returns: + A model instance. + """ + if not (weights in {"imagenet", None} or gfile.exists(weights)): + raise ValueError( + "The `weights` argument should be either " + "`None` (random initialization), `imagenet` " + "(pre-training on ImageNet), " + "or the path to the weights file to be loaded; " + f"Received: weights={weights}" + ) + + if weights == "imagenet" and include_top and classes != 1000: + raise ValueError( + 'If using `weights="imagenet"` with `include_top=True`, ' + "`classes` should be 1000. " + f"Received classes={classes}" + ) + + # Determine proper input shape + input_shape = imagenet_utils.obtain_input_shape( + input_shape, + default_size=299, + min_size=75, + data_format=backend.image_data_format(), + require_flatten=include_top, + weights=weights, + ) + + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + else: + if not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + + if backend.image_data_format() == "channels_first": + channel_axis = 1 + else: + channel_axis = 3 + + x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding="valid") + x = conv2d_bn(x, 32, 3, 3, padding="valid") + x = conv2d_bn(x, 64, 3, 3) + x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) + + x = conv2d_bn(x, 80, 1, 1, padding="valid") + x = conv2d_bn(x, 192, 3, 3, padding="valid") + x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) + + # mixed 0: 35 x 35 x 256 + branch1x1 = conv2d_bn(x, 64, 1, 1) + + branch5x5 = conv2d_bn(x, 48, 1, 1) + branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) + + branch3x3dbl = conv2d_bn(x, 64, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + + branch_pool = layers.AveragePooling2D( + (3, 3), strides=(1, 1), padding="same" + )(x) + branch_pool = conv2d_bn(branch_pool, 32, 1, 1) + x = layers.concatenate( + [branch1x1, branch5x5, branch3x3dbl, branch_pool], + axis=channel_axis, + name="mixed0", + ) + + # mixed 1: 35 x 35 x 288 + branch1x1 = conv2d_bn(x, 64, 1, 1) + + branch5x5 = conv2d_bn(x, 48, 1, 1) + branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) + + branch3x3dbl = conv2d_bn(x, 64, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + + branch_pool = layers.AveragePooling2D( + (3, 3), strides=(1, 1), padding="same" + )(x) + branch_pool = conv2d_bn(branch_pool, 64, 1, 1) + x = layers.concatenate( + [branch1x1, branch5x5, branch3x3dbl, branch_pool], + axis=channel_axis, + name="mixed1", + ) + + # mixed 2: 35 x 35 x 288 + branch1x1 = conv2d_bn(x, 64, 1, 1) + + branch5x5 = conv2d_bn(x, 48, 1, 1) + branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) + + branch3x3dbl = conv2d_bn(x, 64, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + + branch_pool = layers.AveragePooling2D( + (3, 3), strides=(1, 1), padding="same" + )(x) + branch_pool = conv2d_bn(branch_pool, 64, 1, 1) + x = layers.concatenate( + [branch1x1, branch5x5, branch3x3dbl, branch_pool], + axis=channel_axis, + name="mixed2", + ) + + # mixed 3: 17 x 17 x 768 + branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding="valid") + + branch3x3dbl = conv2d_bn(x, 64, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch3x3dbl = conv2d_bn( + branch3x3dbl, 96, 3, 3, strides=(2, 2), padding="valid" + ) + + branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) + x = layers.concatenate( + [branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name="mixed3" + ) + + # mixed 4: 17 x 17 x 768 + branch1x1 = conv2d_bn(x, 192, 1, 1) + + branch7x7 = conv2d_bn(x, 128, 1, 1) + branch7x7 = conv2d_bn(branch7x7, 128, 1, 7) + branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) + + branch7x7dbl = conv2d_bn(x, 128, 1, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7) + branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) + + branch_pool = layers.AveragePooling2D( + (3, 3), strides=(1, 1), padding="same" + )(x) + branch_pool = conv2d_bn(branch_pool, 192, 1, 1) + x = layers.concatenate( + [branch1x1, branch7x7, branch7x7dbl, branch_pool], + axis=channel_axis, + name="mixed4", + ) + + # mixed 5, 6: 17 x 17 x 768 + for i in range(2): + branch1x1 = conv2d_bn(x, 192, 1, 1) + + branch7x7 = conv2d_bn(x, 160, 1, 1) + branch7x7 = conv2d_bn(branch7x7, 160, 1, 7) + branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) + + branch7x7dbl = conv2d_bn(x, 160, 1, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7) + branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) + + branch_pool = layers.AveragePooling2D( + (3, 3), strides=(1, 1), padding="same" + )(x) + branch_pool = conv2d_bn(branch_pool, 192, 1, 1) + x = layers.concatenate( + [branch1x1, branch7x7, branch7x7dbl, branch_pool], + axis=channel_axis, + name="mixed" + str(5 + i), + ) + + # mixed 7: 17 x 17 x 768 + branch1x1 = conv2d_bn(x, 192, 1, 1) + + branch7x7 = conv2d_bn(x, 192, 1, 1) + branch7x7 = conv2d_bn(branch7x7, 192, 1, 7) + branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) + + branch7x7dbl = conv2d_bn(x, 192, 1, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) + + branch_pool = layers.AveragePooling2D( + (3, 3), strides=(1, 1), padding="same" + )(x) + branch_pool = conv2d_bn(branch_pool, 192, 1, 1) + x = layers.concatenate( + [branch1x1, branch7x7, branch7x7dbl, branch_pool], + axis=channel_axis, + name="mixed7", + ) + + # mixed 8: 8 x 8 x 1280 + branch3x3 = conv2d_bn(x, 192, 1, 1) + branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding="valid") + + branch7x7x3 = conv2d_bn(x, 192, 1, 1) + branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7) + branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1) + branch7x7x3 = conv2d_bn( + branch7x7x3, 192, 3, 3, strides=(2, 2), padding="valid" + ) + + branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) + x = layers.concatenate( + [branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name="mixed8" + ) + + # mixed 9: 8 x 8 x 2048 + for i in range(2): + branch1x1 = conv2d_bn(x, 320, 1, 1) + + branch3x3 = conv2d_bn(x, 384, 1, 1) + branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3) + branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1) + branch3x3 = layers.concatenate( + [branch3x3_1, branch3x3_2], + axis=channel_axis, + name="mixed9_" + str(i), + ) + + branch3x3dbl = conv2d_bn(x, 448, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3) + branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3) + branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1) + branch3x3dbl = layers.concatenate( + [branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis + ) + + branch_pool = layers.AveragePooling2D( + (3, 3), strides=(1, 1), padding="same" + )(x) + branch_pool = conv2d_bn(branch_pool, 192, 1, 1) + x = layers.concatenate( + [branch1x1, branch3x3, branch3x3dbl, branch_pool], + axis=channel_axis, + name="mixed" + str(9 + i), + ) + if include_top: + # Classification block + x = layers.GlobalAveragePooling2D(name="avg_pool")(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense( + classes, activation=classifier_activation, name="predictions" + )(x) + else: + if pooling == "avg": + x = layers.GlobalAveragePooling2D()(x) + elif pooling == "max": + x = layers.GlobalMaxPooling2D()(x) + + # Ensure that the model takes into account + # any potential predecessors of `input_tensor`. + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + # Create model. + model = Functional(inputs, x, name="inception_v3") + + # Load weights. + if weights == "imagenet": + if include_top: + weights_path = file_utils.get_file( + "inception_v3_weights_tf_dim_ordering_tf_kernels.h5", + WEIGHTS_PATH, + cache_subdir="models", + file_hash="9a0d58056eeedaa3f26cb7ebd46da564", + ) + else: + weights_path = file_utils.get_file( + "inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5", + WEIGHTS_PATH_NO_TOP, + cache_subdir="models", + file_hash="bcbd6486424b2319ff4ef7d526e38f63", + ) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + + return model + + +def conv2d_bn( + x, filters, num_row, num_col, padding="same", strides=(1, 1), name=None +): + """Utility function to apply conv + BN. + + Args: + x: input tensor. + filters: filters in `Conv2D`. + num_row: height of the convolution kernel. + num_col: width of the convolution kernel. + padding: padding mode in `Conv2D`. + strides: strides in `Conv2D`. + name: name of the ops; will become `name + '_conv'` + for the convolution and `name + '_bn'` for the + batch norm layer. + + Returns: + Output tensor after applying `Conv2D` and `BatchNormalization`. + """ + if name is not None: + bn_name = name + "_bn" + conv_name = name + "_conv" + else: + bn_name = None + conv_name = None + if backend.image_data_format() == "channels_first": + bn_axis = 1 + else: + bn_axis = 3 + x = layers.Conv2D( + filters, + (num_row, num_col), + strides=strides, + padding=padding, + use_bias=False, + name=conv_name, + )(x) + x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) + x = layers.Activation("relu", name=name)(x) + return x + + +@keras_core_export("keras_core.applications.inception_v3.preprocess_input") +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input( + x, data_format=data_format, mode="tf" + ) + + +@keras_core_export("keras_core.applications.inception_v3.decode_predictions") +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) + + +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format( + mode="", + ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, + error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC, +) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ diff --git a/keras_core/layers/reshaping/cropping1d.py b/keras_core/layers/reshaping/cropping1d.py index 8b30b9008..a52a05310 100644 --- a/keras_core/layers/reshaping/cropping1d.py +++ b/keras_core/layers/reshaping/cropping1d.py @@ -1,7 +1,6 @@ from keras_core.api_export import keras_core_export from keras_core.layers.input_spec import InputSpec from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation @keras_core_export("keras_core.layers.Cropping1D") @@ -43,9 +42,9 @@ class Cropping1D(Layer): def __init__(self, cropping=(1, 1), name=None, dtype=None): super().__init__(name=name, dtype=dtype) - self.cropping = argument_validation.standardize_tuple( - cropping, 2, "cropping", allow_zero=True - ) + if isinstance(cropping, int): + cropping = (cropping, cropping) + self.cropping = cropping self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): diff --git a/keras_core/layers/reshaping/cropping1d_test.py b/keras_core/layers/reshaping/cropping1d_test.py index ec561660b..faa46e48b 100644 --- a/keras_core/layers/reshaping/cropping1d_test.py +++ b/keras_core/layers/reshaping/cropping1d_test.py @@ -56,14 +56,6 @@ class Cropping1DTest(testing.TestCase): cropped = layers.Cropping1D((1, 2))(input_layer) self.assertEqual(cropped.shape, (1, None, 7)) - def test_cropping_1d_errors_if_cropping_argument_invalid(self): - with self.assertRaises(ValueError): - layers.Cropping1D(cropping=(1,)) - with self.assertRaises(ValueError): - layers.Cropping1D(cropping=(1, 2, 3)) - with self.assertRaises(ValueError): - layers.Cropping1D(cropping="1") - def test_cropping_1d_errors_if_cropping_more_than_available(self): with self.assertRaises(ValueError): input_layer = layers.Input(batch_shape=(3, 5, 7)) diff --git a/keras_core/layers/reshaping/cropping2d.py b/keras_core/layers/reshaping/cropping2d.py index 18faf95a5..d1fa0f77a 100644 --- a/keras_core/layers/reshaping/cropping2d.py +++ b/keras_core/layers/reshaping/cropping2d.py @@ -2,7 +2,6 @@ from keras_core import backend from keras_core.api_export import keras_core_export from keras_core.layers.input_spec import InputSpec from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation @keras_core_export("keras_core.layers.Cropping2D") @@ -66,12 +65,12 @@ class Cropping2D(Layer): "`cropping` should have two elements. " f"Received: cropping={cropping}." ) - height_cropping = argument_validation.standardize_tuple( - cropping[0], 2, "1st entry of cropping", allow_zero=True - ) - width_cropping = argument_validation.standardize_tuple( - cropping[1], 2, "2nd entry of cropping", allow_zero=True - ) + height_cropping = cropping[0] + if isinstance(height_cropping, int): + height_cropping = (height_cropping, height_cropping) + width_cropping = cropping[1] + if isinstance(width_cropping, int): + width_cropping = (width_cropping, width_cropping) self.cropping = (height_cropping, width_cropping) else: raise ValueError( diff --git a/keras_core/layers/reshaping/cropping2d_test.py b/keras_core/layers/reshaping/cropping2d_test.py index 86b1608e8..29431d769 100644 --- a/keras_core/layers/reshaping/cropping2d_test.py +++ b/keras_core/layers/reshaping/cropping2d_test.py @@ -97,9 +97,3 @@ class Cropping2DTest(testing.TestCase, parameterized.TestCase): layers.Cropping2D(cropping=(1, 2, 3)) with self.assertRaises(ValueError): layers.Cropping2D(cropping="1") - with self.assertRaises(ValueError): - layers.Cropping2D(cropping=((1, 2), (3, 4, 5))) - with self.assertRaises(ValueError): - layers.Cropping2D(cropping=((1, 2), (3, -4))) - with self.assertRaises(ValueError): - layers.Cropping2D(cropping=((1, 2), "3")) diff --git a/keras_core/layers/reshaping/cropping3d.py b/keras_core/layers/reshaping/cropping3d.py index cf268bb70..542f55886 100644 --- a/keras_core/layers/reshaping/cropping3d.py +++ b/keras_core/layers/reshaping/cropping3d.py @@ -2,7 +2,6 @@ from keras_core import backend from keras_core.api_export import keras_core_export from keras_core.layers.input_spec import InputSpec from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation @keras_core_export("keras_core.layers.Cropping3D") @@ -76,15 +75,15 @@ class Cropping3D(Layer): raise ValueError( f"`cropping` should have 3 elements. Received: {cropping}." ) - dim1_cropping = argument_validation.standardize_tuple( - cropping[0], 2, "1st entry of cropping", allow_zero=True - ) - dim2_cropping = argument_validation.standardize_tuple( - cropping[1], 2, "2nd entry of cropping", allow_zero=True - ) - dim3_cropping = argument_validation.standardize_tuple( - cropping[2], 2, "3rd entry of cropping", allow_zero=True - ) + dim1_cropping = cropping[0] + if isinstance(dim1_cropping, int): + dim1_cropping = (dim1_cropping, dim1_cropping) + dim2_cropping = cropping[1] + if isinstance(dim2_cropping, int): + dim2_cropping = (dim2_cropping, dim2_cropping) + dim3_cropping = cropping[2] + if isinstance(dim3_cropping, int): + dim3_cropping = (dim3_cropping, dim3_cropping) self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping) else: raise ValueError( diff --git a/keras_core/layers/reshaping/cropping3d_test.py b/keras_core/layers/reshaping/cropping3d_test.py index 0432ad4b8..67c780706 100644 --- a/keras_core/layers/reshaping/cropping3d_test.py +++ b/keras_core/layers/reshaping/cropping3d_test.py @@ -159,9 +159,3 @@ class Cropping3DTest(testing.TestCase, parameterized.TestCase): layers.Cropping3D(cropping=(1, 2, 3, 4)) with self.assertRaises(ValueError): layers.Cropping3D(cropping="1") - with self.assertRaises(ValueError): - layers.Cropping3D(cropping=((1, 2), (3, 4), (5, 6, 7))) - with self.assertRaises(ValueError): - layers.Cropping3D(cropping=((1, 2), (3, 4), (5, -6))) - with self.assertRaises(ValueError): - layers.Cropping3D(cropping=((1, 2), (3, 4), "5")) diff --git a/keras_core/layers/reshaping/zero_padding1d.py b/keras_core/layers/reshaping/zero_padding1d.py index 73c94d346..b0a51e733 100644 --- a/keras_core/layers/reshaping/zero_padding1d.py +++ b/keras_core/layers/reshaping/zero_padding1d.py @@ -2,7 +2,6 @@ from keras_core import operations as ops from keras_core.api_export import keras_core_export from keras_core.layers.input_spec import InputSpec from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation @keras_core_export("keras_core.layers.ZeroPadding1D") @@ -49,9 +48,9 @@ class ZeroPadding1D(Layer): def __init__(self, padding=1, name=None, dtype=None): super().__init__(name=name, dtype=dtype) - self.padding = argument_validation.standardize_tuple( - padding, 2, "padding", allow_zero=True - ) + if isinstance(padding, int): + padding = (padding, padding) + self.padding = padding self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): diff --git a/keras_core/layers/reshaping/zero_padding1d_test.py b/keras_core/layers/reshaping/zero_padding1d_test.py index abe2f700f..182e5c46b 100644 --- a/keras_core/layers/reshaping/zero_padding1d_test.py +++ b/keras_core/layers/reshaping/zero_padding1d_test.py @@ -33,11 +33,3 @@ class ZeroPadding1DTest(testing.TestCase, parameterized.TestCase): input_layer = layers.Input(batch_shape=(1, None, 3)) padded = layers.ZeroPadding1D((1, 2))(input_layer) self.assertEqual(padded.shape, (1, None, 3)) - - def test_zero_padding_1d_errors_if_padding_argument_invalid(self): - with self.assertRaises(ValueError): - layers.ZeroPadding1D(padding=(1,)) - with self.assertRaises(ValueError): - layers.ZeroPadding1D(padding=(1, 2, 3)) - with self.assertRaises(ValueError): - layers.ZeroPadding1D(padding="1") diff --git a/keras_core/layers/reshaping/zero_padding2d.py b/keras_core/layers/reshaping/zero_padding2d.py index 1131737fb..a1bd92b40 100644 --- a/keras_core/layers/reshaping/zero_padding2d.py +++ b/keras_core/layers/reshaping/zero_padding2d.py @@ -3,7 +3,6 @@ from keras_core import operations as ops from keras_core.api_export import keras_core_export from keras_core.layers.input_spec import InputSpec from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation @keras_core_export("keras_core.layers.ZeroPadding2D") @@ -79,12 +78,12 @@ class ZeroPadding2D(Layer): "`padding` should have two elements. " f"Received: padding={padding}." ) - height_padding = argument_validation.standardize_tuple( - padding[0], 2, "1st entry of padding", allow_zero=True - ) - width_padding = argument_validation.standardize_tuple( - padding[1], 2, "2nd entry of padding", allow_zero=True - ) + height_padding = padding[0] + if isinstance(height_padding, int): + height_padding = (height_padding, height_padding) + width_padding = padding[1] + if isinstance(width_padding, int): + width_padding = (width_padding, width_padding) self.padding = (height_padding, width_padding) else: raise ValueError( diff --git a/keras_core/layers/reshaping/zero_padding2d_test.py b/keras_core/layers/reshaping/zero_padding2d_test.py index 7a2acc9ad..a31893cf0 100644 --- a/keras_core/layers/reshaping/zero_padding2d_test.py +++ b/keras_core/layers/reshaping/zero_padding2d_test.py @@ -74,9 +74,3 @@ class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase): layers.ZeroPadding2D(padding=(1, 2, 3)) with self.assertRaises(ValueError): layers.ZeroPadding2D(padding="1") - with self.assertRaises(ValueError): - layers.ZeroPadding2D(padding=((1, 2), (3, 4, 5))) - with self.assertRaises(ValueError): - layers.ZeroPadding2D(padding=((1, 2), (3, -4))) - with self.assertRaises(ValueError): - layers.ZeroPadding2D(padding=((1, 2), "3")) diff --git a/keras_core/layers/reshaping/zero_padding3d.py b/keras_core/layers/reshaping/zero_padding3d.py index 2baee1884..021c381a8 100644 --- a/keras_core/layers/reshaping/zero_padding3d.py +++ b/keras_core/layers/reshaping/zero_padding3d.py @@ -3,7 +3,6 @@ from keras_core import operations as ops from keras_core.api_export import keras_core_export from keras_core.layers.input_spec import InputSpec from keras_core.layers.layer import Layer -from keras_core.utils import argument_validation @keras_core_export("keras_core.layers.ZeroPadding3D") @@ -77,15 +76,16 @@ class ZeroPadding3D(Layer): raise ValueError( f"`padding` should have 3 elements. Received: {padding}." ) - dim1_padding = argument_validation.standardize_tuple( - padding[0], 2, "1st entry of padding", allow_zero=True - ) - dim2_padding = argument_validation.standardize_tuple( - padding[1], 2, "2nd entry of padding", allow_zero=True - ) - dim3_padding = argument_validation.standardize_tuple( - padding[2], 2, "3rd entry of padding", allow_zero=True - ) + dim1_padding = padding[0] + if isinstance(dim1_padding, int): + dim1_padding = (dim1_padding, dim1_padding) + dim2_padding = padding[1] + if isinstance(dim2_padding, int): + dim2_padding = (dim2_padding, dim2_padding) + dim3_padding = padding[2] + if isinstance(dim3_padding, int): + dim3_padding = (dim3_padding, dim3_padding) + self.padding = (dim1_padding, dim2_padding, dim3_padding) self.padding = (dim1_padding, dim2_padding, dim3_padding) else: raise ValueError( diff --git a/keras_core/layers/reshaping/zero_padding3d_test.py b/keras_core/layers/reshaping/zero_padding3d_test.py index 7c32fb0f3..22d8b5d3d 100644 --- a/keras_core/layers/reshaping/zero_padding3d_test.py +++ b/keras_core/layers/reshaping/zero_padding3d_test.py @@ -82,9 +82,3 @@ class ZeroPadding3DTest(testing.TestCase, parameterized.TestCase): layers.ZeroPadding3D(padding=(1, 2, 3, 4)) with self.assertRaises(ValueError): layers.ZeroPadding3D(padding="1") - with self.assertRaises(ValueError): - layers.ZeroPadding3D(padding=((1, 2), (3, 4), (5, 6, 7))) - with self.assertRaises(ValueError): - layers.ZeroPadding3D(padding=((1, 2), (3, 4), (5, -6))) - with self.assertRaises(ValueError): - layers.ZeroPadding3D(padding=((1, 2), (3, 4), "5")) diff --git a/keras_core/models/functional.py b/keras_core/models/functional.py index c8be866a6..60d0cc1aa 100644 --- a/keras_core/models/functional.py +++ b/keras_core/models/functional.py @@ -540,7 +540,7 @@ def deserialize_node(node_data, created_layers): if layer is None: raise ValueError(f"Unknown layer: {history[0]}") inbound_node_index = history[1] - inbound_tensor_index = history[1] + inbound_tensor_index = history[2] if len(layer._inbound_nodes) <= inbound_node_index: raise ValueError( "Layer node index out of bounds.\n" diff --git a/keras_core/models/sequential.py b/keras_core/models/sequential.py index be4e141ed..49725c1ee 100644 --- a/keras_core/models/sequential.py +++ b/keras_core/models/sequential.py @@ -112,11 +112,6 @@ class Sequential(Model): self._functional = Functional(inputs=inputs, outputs=outputs) self.built = True - def __call__(self, inputs, training=None, mask=None): - if self._functional: - return self._functional(inputs, training=training, mask=mask) - return super().__call__(inputs, training=training, mask=mask) - def call(self, inputs, training=None, mask=None): if self._functional: return self._functional.call(inputs, training=training, mask=mask)