Merge pull request #16617 from haifeng-jin:flake8

PiperOrigin-RevId: 451558871
This commit is contained in:
TensorFlower Gardener 2022-05-27 21:15:05 -07:00
commit 059781f3b0
65 changed files with 266 additions and 255 deletions

@ -34,7 +34,7 @@ from keras.engine import training
from keras.utils import data_utils
from keras.utils import layer_utils
BASE_WEIGHTS_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/"
BASE_WEIGHTS_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/" # noqa: E501
WEIGHTS_HASHES = {
"b0": (

@ -1319,19 +1319,19 @@ class KerasCallbacksTest(test_combinations.TestCase):
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = get_ModelCheckpoint_load_weights_on_restart_true_test.__func__( # noqa: E501
True
)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = get_ModelCheckpoint_load_weights_on_restart_true_test.__func__( # noqa: E501
False
)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = get_ModelCheckpoint_load_weights_on_restart_false_test.__func__( # noqa: E501
True
)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false = get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false = get_ModelCheckpoint_load_weights_on_restart_false_test.__func__( # noqa: E501
False
)

@ -59,7 +59,7 @@ def load_data(path="boston_housing.npz", test_split=0.2, seed=113):
path = get_file(
path,
origin=origin_folder + "boston_housing.npz",
file_hash="f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5",
file_hash="f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5", # noqa: E501
)
with np.load(
path, allow_pickle=True

@ -80,7 +80,7 @@ def load_data():
dirname,
origin=origin,
untar=True,
file_hash="6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce",
file_hash="6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce", # noqa: E501
)
num_train_samples = 50000

@ -77,7 +77,7 @@ def load_data(label_mode="fine"):
dirname,
origin=origin,
untar=True,
file_hash="85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7",
file_hash="85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7", # noqa: E501
)
fpath = os.path.join(path, "train")

@ -109,7 +109,7 @@ def load_data(
path = get_file(
path,
origin=origin_folder + "imdb.npz",
file_hash="69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f",
file_hash="69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f", # noqa: E501
)
with np.load(
path, allow_pickle=True

@ -73,7 +73,7 @@ def load_data(path="mnist.npz"):
path = get_file(
path,
origin=origin_folder + "mnist.npz",
file_hash="731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1",
file_hash="731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1", # noqa: E501
)
with np.load(
path, allow_pickle=True

@ -115,7 +115,7 @@ def load_data(
path = get_file(
path,
origin=origin_folder + "reuters.npz",
file_hash="d6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916",
file_hash="d6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916", # noqa: E501
)
with np.load(
path, allow_pickle=True

@ -25,11 +25,11 @@ class TrainingCheckpointTests(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy_packed_var,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.tpu_strategy, # noqa: E501
tf.__internal__.distribute.combinations.tpu_strategy_packed_var, # noqa: E501
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)
@ -87,12 +87,12 @@ class TrainingCheckpointTests(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.cloud_tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy_packed_var,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.cloud_tpu_strategy, # noqa: E501
tf.__internal__.distribute.combinations.tpu_strategy, # noqa: E501
tf.__internal__.distribute.combinations.tpu_strategy_packed_var, # noqa: E501
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)

@ -29,8 +29,8 @@ from keras.testing_infra import test_utils
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=[
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu, # noqa: E501
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501
],
mode=["eager"],
)

@ -271,7 +271,7 @@ class TestDistributionStrategyDnnCorrectness(
+ tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
optimizer_fn=[
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
@ -351,7 +351,7 @@ class TestDistributionStrategyDnnCorrectness(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)

@ -68,7 +68,7 @@ class OptimizerTest(tf.test.TestCase, parameterized.TestCase):
def step_fn(grads):
optimizer.apply_gradients(
[(grads, v)],
experimental_aggregate_gradients=experimental_aggregate_gradients,
experimental_aggregate_gradients=experimental_aggregate_gradients, # noqa: E501
)
return v.read_value()
@ -80,7 +80,7 @@ class OptimizerTest(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=tf.__internal__.distribute.combinations.one_device_strategy,
distribution=tf.__internal__.distribute.combinations.one_device_strategy, # noqa: E501
mode=["eager"],
experimental_aggregate_gradients=[True, False],
)
@ -100,7 +100,7 @@ class OptimizerTest(tf.test.TestCase, parameterized.TestCase):
def step_fn(grads):
optimizer.apply_gradients(
[(grads, v)],
experimental_aggregate_gradients=experimental_aggregate_gradients,
experimental_aggregate_gradients=experimental_aggregate_gradients, # noqa: E501
)
return v.read_value()
@ -113,7 +113,7 @@ class OptimizerTest(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu # noqa: E501
]
)
)

@ -254,8 +254,8 @@ def all_strategy_minus_default_and_tpu_combinations():
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["graph", "eager"],
)
@ -1434,7 +1434,7 @@ class TestDistributionStrategyWithDatasets(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.one_device_strategy,
],
mode=["graph", "eager"],
@ -1467,7 +1467,7 @@ class TestDistributionStrategyWithDatasets(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu # noqa: E501
],
mode=["graph", "eager"],
)
@ -1492,8 +1492,8 @@ class TestDistributionStrategyWithDatasets(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["graph", "eager"],
)
@ -2309,8 +2309,8 @@ class TestDistributionStrategyWithKerasModels(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["graph", "eager"],
reduction=[
@ -2476,8 +2476,8 @@ class TestDistributionStrategyWithKerasModels(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)
@ -3011,7 +3011,7 @@ class TestModelCapturesStrategy(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
distribution=tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu, # noqa: E501
mode=["eager"],
)
)

@ -115,7 +115,7 @@ class TestDistributionStrategyDnnCorrectness(
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph()
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness_with_partial_last_batch_eval(
@ -129,7 +129,7 @@ class TestDistributionStrategyDnnCorrectness(
)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.strategy_minus_tpu_and_input_config_combinations_eager()
keras_correctness_test_base.strategy_minus_tpu_and_input_config_combinations_eager() # noqa: E501
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness_with_partial_last_batch(
@ -354,7 +354,7 @@ class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
self.run_dynamic_lr_test(distribution)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph()
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
)
def test_dnn_correctness_with_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data

@ -25,7 +25,7 @@ from keras.optimizers.optimizer_v2 import (
class DistributionStrategyEmbeddingModelCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase # noqa: E501
):
def get_model(
self,
@ -83,7 +83,7 @@ class DistributionStrategyEmbeddingModelCorrectnessTest(
class DistributionStrategySiameseEmbeddingModelCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase # noqa: E501
):
def get_model(
self,

@ -106,7 +106,7 @@ class DistributionStrategyCnnCorrectnessTest(
):
if (
distribution
== tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu
== tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu # noqa: E501
):
self.skipTest("b/183958183")
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@ -140,9 +140,9 @@ class DistributionStrategyCnnCorrectnessTest(
)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations_eager()
keras_correctness_test_base.all_strategy_and_input_config_combinations_eager() # noqa: E501
+ keras_correctness_test_base.multi_worker_mirrored_eager()
+ keras_correctness_test_base.test_combinations_with_tpu_strategies_graph()
+ keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
)
def test_cnn_correctness_with_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data
@ -156,9 +156,9 @@ class DistributionStrategyCnnCorrectnessTest(
)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations_eager()
keras_correctness_test_base.all_strategy_and_input_config_combinations_eager() # noqa: E501
+ keras_correctness_test_base.multi_worker_mirrored_eager()
+ keras_correctness_test_base.test_combinations_with_tpu_strategies_graph()
+ keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
)
def test_cnn_with_batch_norm_correctness_and_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data

@ -34,7 +34,7 @@ class MirroredStrategyOptimizerV2Test(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, # noqa: E501
],
mode=["graph", "eager"],
)
@ -96,7 +96,7 @@ class MirroredStrategyOptimizerV2Test(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, # noqa: E501
],
mode=["graph", "eager"],
)

@ -33,14 +33,14 @@ def strategy_combinations_eager_data_fn():
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x2_gpu,
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_cpu,
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_1gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu, # noqa: E501
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x2_gpu, # noqa: E501
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_cpu, # noqa: E501
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_1gpu, # noqa: E501
# NOTE: TPUStrategy not tested because the models in this test are
# sparse and do not work with TPUs.
],

@ -31,7 +31,7 @@ from keras.testing_infra import test_utils
class _DistributionStrategyRnnModelCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase # noqa: E501
):
def _get_layer_class(self):
raise NotImplementedError

@ -42,7 +42,7 @@ def test_combinations_for_stateful_embedding_model():
class DistributionStrategyStatefulLstmModelCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase # noqa: E501
):
def get_model(
self,
@ -97,7 +97,7 @@ class DistributionStrategyStatefulLstmModelCorrectnessTest(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph()
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
)
)
def test_incorrectly_use_multiple_cores_for_stateful_lstm_model(

@ -197,7 +197,7 @@ class TestDistributionStrategyErrorCases(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
],
mode=["graph"],
)
@ -227,14 +227,14 @@ class TestDistributionStrategyErrorCases(
"PerReplica:.+",
):
with distribution.scope():
distributed_training_utils_v1.validate_distributed_dataset_inputs(
distributed_training_utils_v1.validate_distributed_dataset_inputs( # noqa: E501
distribution, x, None
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
],
mode=["graph", "eager"],
)
@ -264,14 +264,14 @@ class TestDistributionStrategyErrorCases(
"PerReplica:.+",
):
with distribution.scope():
distributed_training_utils_v1.validate_distributed_dataset_inputs(
distributed_training_utils_v1.validate_distributed_dataset_inputs( # noqa: E501
distribution, x, None
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
],
mode=["graph", "eager"],
)
@ -322,7 +322,7 @@ class TestDistributionStrategyErrorCases(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.one_device_strategy,
],
mode=["graph", "eager"],
@ -355,7 +355,7 @@ class TestDistributionStrategyErrorCases(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.one_device_strategy,
],
mode=["graph", "eager"],
@ -406,10 +406,10 @@ class TestDistributionStrategyWithLossMasking(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
],
mode=["graph", "eager"],
optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, # noqa: E501
)
)
def test_masking(self, distribution, optimizer):
@ -443,7 +443,7 @@ class TestDistributionStrategyWithNormalizationLayer(
keras_test_lib.all_strategy_combinations(),
tf.__internal__.test.combinations.combine(
fused=[True, False],
optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, # noqa: E501
),
)
)
@ -489,7 +489,7 @@ class TestDistributionStrategyWithNormalizationLayer(
tf.__internal__.test.combinations.times(
keras_test_lib.tpu_strategy_combinations(),
tf.__internal__.test.combinations.combine(
optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn
optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn # noqa: E501
),
)
)
@ -653,7 +653,7 @@ class TestDistributionStrategyWithStaticShapes(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
],
mode=["graph", "eager"],
)
@ -670,7 +670,7 @@ class TestDistributionStrategyWithStaticShapes(
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
],
mode=["graph", "eager"],
)

@ -388,15 +388,15 @@ class MinimizeLossStepTest(tf.test.TestCase, parameterized.TestCase):
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call,
tf.__internal__.distribute.combinations.one_device_strategy, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501
]
),
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
optimizer_fn=optimizer_combinations.gradient_descent_optimizer_v1_fn
optimizer_fn=optimizer_combinations.gradient_descent_optimizer_v1_fn # noqa: E501
),
tf.__internal__.test.combinations.combine(
mode=["graph"], use_callable_loss=[True, False]
@ -407,7 +407,7 @@ class MinimizeLossStepTest(tf.test.TestCase, parameterized.TestCase):
)
+ tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
optimizer_fn=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn
optimizer_fn=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn # noqa: E501
),
tf.__internal__.test.combinations.combine(
mode=["graph", "eager"], use_callable_loss=[True]
@ -418,7 +418,7 @@ class MinimizeLossStepTest(tf.test.TestCase, parameterized.TestCase):
distribution=[
tf.__internal__.distribute.combinations.tpu_strategy
],
optimizer_fn=optimizer_combinations.gradient_descent_optimizer_v1_fn,
optimizer_fn=optimizer_combinations.gradient_descent_optimizer_v1_fn, # noqa: E501
mode=["graph"],
use_callable_loss=[True, False],
)
@ -426,7 +426,7 @@ class MinimizeLossStepTest(tf.test.TestCase, parameterized.TestCase):
distribution=[
tf.__internal__.distribute.combinations.tpu_strategy
],
optimizer_fn=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_fn=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, # noqa: E501
mode=["graph"],
use_callable_loss=[True],
),

@ -49,7 +49,7 @@ class MiniModel(keras_training.Model):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
],
mode=["eager"],
)

@ -51,7 +51,7 @@ def get_strategy_with_mimicing_cpus():
filter(
None.__ne__,
[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
get_strategy_with_mimicing_cpus(),
],
)

@ -159,7 +159,7 @@ class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_saves_on_chief_but_not_otherwise,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self, file_format),
@ -192,7 +192,7 @@ class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self, saving_filepath),
@ -263,7 +263,7 @@ class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self, saving_filepath),
@ -306,7 +306,7 @@ class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
tf.__internal__.distribute.multi_process_runner.run(
proc_profiler_saves_on_both_chief_and_non_chief,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self,),
@ -357,7 +357,7 @@ class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_saves_on_chief_but_not_otherwise,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self,),
@ -395,7 +395,7 @@ class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_can_still_save_to_temp_even_if_it_exists,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self,),
@ -432,7 +432,7 @@ class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self, saving_filepath),
@ -466,7 +466,7 @@ class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
tf.__internal__.distribute.multi_process_runner.run(
proc_early_stopping,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self,),

@ -194,8 +194,8 @@ class KerasMultiWorkerTestIndependentWorker(
tf.__internal__.test.combinations.combine(
mode=["eager"],
strategy=[
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu, # noqa: E501
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501
],
)
)
@ -236,7 +236,7 @@ class KPLMultiWorkerTest(tf.test.TestCase, parameterized.TestCase):
mode=["eager"],
use_adapt=[False], # TODO(b/180742437): Add tests for using adapt.
strategy=[
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501
# TODO(b/183956672): Re-enable
# strategy_combinations.multi_worker_mirrored_2x2_gpu,
],

@ -100,9 +100,9 @@ def distributions_and_v1_optimizers():
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501
],
optimizer_fn=optimizers_v1,
)
@ -114,9 +114,9 @@ def distributions_and_v2_optimizers():
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501
],
optimizer_fn=optimizers_v2,
)
@ -128,9 +128,9 @@ def distributions_and_v1_and_v2_optimizers():
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501
],
optimizer_fn=optimizers_v1_and_v2,
)

@ -49,7 +49,7 @@ strategies = [
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy_packed_var,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, # noqa: E501
]

@ -30,7 +30,7 @@ class ShardedVariableTest(tf.test.TestCase, parameterized.TestCase):
super().setUpClass()
cls.strategy = tf.distribute.experimental.ParameterServerStrategy(
multi_worker_testing_utils.make_parameter_server_cluster(3, 2),
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner(
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner( # noqa: E501
2
),
)
@ -184,7 +184,7 @@ class ShardedVariableTest(tf.test.TestCase, parameterized.TestCase):
if shard_config[0] > 2:
strategy = tf.distribute.experimental.ParameterServerStrategy(
multi_worker_testing_utils.make_parameter_server_cluster(3, 3),
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner(
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner( # noqa: E501
shard_config[0]
),
)
@ -217,7 +217,7 @@ class ShardedVariableTest(tf.test.TestCase, parameterized.TestCase):
if shard_config[1] > 2:
strategy2 = tf.distribute.experimental.ParameterServerStrategy(
multi_worker_testing_utils.make_parameter_server_cluster(3, 3),
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner(
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner( # noqa: E501
shard_config[1]
),
)
@ -384,7 +384,7 @@ class ShardedVariableTest(tf.test.TestCase, parameterized.TestCase):
# Create new strategy with different number of shards
strategy2 = tf.distribute.experimental.ParameterServerStrategy(
multi_worker_testing_utils.make_parameter_server_cluster(3, 2),
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner(
variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner( # noqa: E501
3
),
)

@ -33,7 +33,7 @@ strategies_minus_default_minus_tpu = [
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu, # noqa: E501
]
strategies_minus_tpu = [
@ -42,7 +42,7 @@ strategies_minus_tpu = [
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu, # noqa: E501
]
multi_worker_mirrored_strategies = [
@ -56,13 +56,13 @@ tpu_strategies = [
]
parameter_server_strategies_single_worker = [
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_cpu,
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_1gpu,
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_cpu, # noqa: E501
tf.__internal__.distribute.combinations.parameter_server_strategy_1worker_2ps_1gpu, # noqa: E501
]
parameter_server_strategies_multi_worker = [
tf.__internal__.distribute.combinations.parameter_server_strategy_3worker_2ps_cpu,
tf.__internal__.distribute.combinations.parameter_server_strategy_3worker_2ps_1gpu,
tf.__internal__.distribute.combinations.parameter_server_strategy_3worker_2ps_cpu, # noqa: E501
tf.__internal__.distribute.combinations.parameter_server_strategy_3worker_2ps_1gpu, # noqa: E501
]
all_strategies = strategies_minus_tpu + tpu_strategies

@ -153,7 +153,7 @@ class Optimizer(optimizer_lib._BaseOptimizer):
def _overwrite_model_variables_with_average_value_helper(self, var_list):
"""Helper function to _overwrite_model_variables_with_average_value."""
(
optimizer_lib._BaseOptimizer._overwrite_model_variables_with_average_value_helper(
optimizer_lib._BaseOptimizer._overwrite_model_variables_with_average_value_helper( # noqa: E501
self, var_list
)
)

@ -1498,7 +1498,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
)
)
with self.distribute_strategy.scope(), training_utils.RespectCompiledTrainableState(
with self.distribute_strategy.scope(), training_utils.RespectCompiledTrainableState( # noqa: E501
self
):
# Creates a `tf.data.Dataset` and handles batch and epoch iteration.
@ -2377,7 +2377,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
_disallow_inside_tf_function("train_on_batch")
if reset_metrics:
self.reset_metrics()
with self.distribute_strategy.scope(), training_utils.RespectCompiledTrainableState(
with self.distribute_strategy.scope(), training_utils.RespectCompiledTrainableState( # noqa: E501
self
):
iterator = data_adapter.single_batch_iterator(

@ -306,7 +306,7 @@ def model_iteration(
# case.
if not callable(ins) or (
model._distribution_strategy
and not distributed_training_utils_v1.is_distributing_by_cloning(
and not distributed_training_utils_v1.is_distributing_by_cloning( # noqa: E501
model
)
):
@ -353,7 +353,7 @@ def model_iteration(
batch_outs = [batch_outs]
if model._distribution_strategy:
batch_outs = distributed_training_utils_v1._per_replica_aggregate_batch(
batch_outs = distributed_training_utils_v1._per_replica_aggregate_batch( # noqa: E501
model._distribution_strategy, batch_outs, model, mode
)

@ -346,7 +346,7 @@ class TestTrainingWithDataset(test_combinations.TestCase):
)
def test_dataset_input_shape_validation(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session():
with tf.compat.v1.get_default_graph().as_default(), self.cached_session(): # noqa: E501
model = test_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile(optimizer="rmsprop", loss="mse")

@ -45,7 +45,7 @@ class TrainingGPUTest(tf.test.TestCase, parameterized.TestCase):
num_channels = None
activation = None
if loss_name == "sparse_categorical_crossentropy":
loss = lambda y_true, y_pred: backend.sparse_categorical_crossentropy(
loss = lambda y_true, y_pred: backend.sparse_categorical_crossentropy( # noqa: E501
y_true, y_pred, axis=axis
)
num_channels = int(np.amax(target) + 1)

@ -644,12 +644,12 @@ class Model(training_lib.Model):
# Case 1: distribution strategy.
if self._distribution_strategy:
if self._in_multi_worker_mode():
return training_distributed_v1.DistributionMultiWorkerTrainingLoop(
training_distributed_v1.DistributionSingleWorkerTrainingLoop()
return training_distributed_v1.DistributionMultiWorkerTrainingLoop( # noqa: E501
training_distributed_v1.DistributionSingleWorkerTrainingLoop() # noqa: E501
)
else:
return (
training_distributed_v1.DistributionSingleWorkerTrainingLoop()
training_distributed_v1.DistributionSingleWorkerTrainingLoop() # noqa: E501
)
# Case 2: generator-like. Input is Python generator, or Sequence object,

@ -101,7 +101,7 @@ class SequenceFeatures(kfc._BaseFeaturesLayer):
feature_columns=feature_columns,
trainable=trainable,
name=name,
expected_column_type=tf.__internal__.feature_column.SequenceDenseColumn,
expected_column_type=tf.__internal__.feature_column.SequenceDenseColumn, # noqa: E501
**kwargs
)

@ -926,7 +926,7 @@ class SequenceFeaturesSavingTest(tf.test.TestCase, parameterized.TestCase):
cols = [
tf.feature_column.sequence_numeric_column("a"),
tf.feature_column.indicator_column(
tf.feature_column.sequence_categorical_column_with_vocabulary_list(
tf.feature_column.sequence_categorical_column_with_vocabulary_list( # noqa: E501
"b", ["one", "two"]
)
),

@ -242,7 +242,7 @@ class MultiWorkerTutorialTest(parameterized.TestCase, tf.test.TestCase):
try:
mpr_result = tf.__internal__.distribute.multi_process_runner.run(
fn,
tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=NUM_WORKERS
),
args=(model_path, checkpoint_dir),

@ -352,11 +352,12 @@ class TestStatefulLambda(test_combinations.TestCase):
expected_error = textwrap.dedent(
r"""
( )?The following Variables were created within a Lambda layer \(shift_and_scale\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*shift_and_scale/scale:0\'.+
( )? <tf.Variable \'.*shift_and_scale/shift:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+"""
( )?The following Variables were created within a Lambda layer \(shift_and_scale\)""" # noqa: E501
r"""
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*shift_and_scale/scale:0\'.+
( )? <tf.Variable \'.*shift_and_scale/shift:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+"""
)
with self.assertRaisesRegex(ValueError, expected_error):
@ -374,10 +375,10 @@ class TestStatefulLambda(test_combinations.TestCase):
expected_error = textwrap.dedent(
r"""
( )?The following Variables were created within a Lambda layer \(bias_dense\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*bias_dense/dense/kernel:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+"""
( )?The following Variables were created within a Lambda layer \(bias_dense\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*bias_dense/dense/kernel:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+"""
)
with self.assertRaisesRegex(ValueError, expected_error):
@ -395,10 +396,10 @@ class TestStatefulLambda(test_combinations.TestCase):
expected_warning = textwrap.dedent(
r"""
( )?The following Variables were used a Lambda layer\'s call \(lambda\), but
( )?are not present in its tracked objects:
( )? <tf.Variable \'.*Variable:0\'.+
( )?It is possible that this is intended behavior.+"""
( )?The following Variables were used a Lambda layer\'s call \(lambda\), but
( )?are not present in its tracked objects:
( )? <tf.Variable \'.*Variable:0\'.+
( )?It is possible that this is intended behavior.+"""
)
layer = keras.layers.Lambda(lambda_fn)

@ -958,7 +958,7 @@ class VariableScopeModule(tf.Module):
`get_variable`&`compat.v1.layers`."""
return {
name: regularizer()
for name, regularizer in self._tf1_style_var_store._regularizers.items()
for name, regularizer in self._tf1_style_var_store._regularizers.items() # noqa: E501
} # pylint: disable=protected-access
@ -1148,7 +1148,7 @@ class TF1VariableScopeLayerTest(tf.test.TestCase, parameterized.TestCase):
"""Dict w/ regularization losses from `get_variable`."""
return {
name: regularizer()
for name, regularizer in self._variable_store._regularizers.items()
for name, regularizer in self._variable_store._regularizers.items() # noqa: E501
} # pylint: disable=protected-access
def __call__(self, inputs, training=None):

@ -455,7 +455,7 @@ class Reduce(Metric):
"""
[
values
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values(
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values( # noqa: E501
[values], sample_weight
)
try:
@ -687,7 +687,7 @@ class MeanMetricWrapper(Mean):
[
y_true,
y_pred,
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values(
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values( # noqa: E501
[y_true, y_pred], sample_weight
)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(

@ -102,7 +102,7 @@ class KerasSumTest(tf.test.TestCase, parameterized.TestCase):
self.assertAlmostEqual(self.evaluate(m.total), 63.75, 2)
def test_sum_graph_with_placeholder(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: # noqa: E501
m = metrics.Sum()
v = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
@ -261,7 +261,7 @@ class MeanTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
def test_mean_graph_with_placeholder(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: # noqa: E501
m = metrics.Mean()
v = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)

@ -110,7 +110,7 @@ class MeanRelativeError(base_metric.Mean):
[
y_pred,
y_true,
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values(
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values( # noqa: E501
[y_pred, y_true], sample_weight
)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
@ -902,8 +902,8 @@ class Precision(base_metric.Metric):
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
},
y_true,
y_pred,
@ -1048,8 +1048,8 @@ class Recall(base_metric.Metric):
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,
@ -1144,10 +1144,10 @@ class SensitivitySpecificityBase(base_metric.Metric, metaclass=abc.ABCMeta):
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,
@ -1918,10 +1918,10 @@ class AUC(base_metric.Metric):
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,

@ -709,7 +709,7 @@ class TestOutputLossMetrics(test_combinations.TestCase):
"output_2_loss": [116, 116],
},
losses_utils.ReductionV2.AUTO: sum_over_batch_size_fit_result,
losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result,
losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result, # noqa: E501
}
# In the order: 'loss', 'output_1_loss', 'output_2_loss',

@ -259,7 +259,7 @@ class KerasAccuracyTest(tf.test.TestCase):
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: # noqa: E501
acc_obj = metrics.SparseCategoricalAccuracy(name="my_acc")
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))

@ -36,7 +36,7 @@ from keras.optimizers.optimizer_v2 import rmsprop
maybe_distribute = tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2,
tf.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2, # noqa: E501
]
)

@ -106,7 +106,7 @@ def _maybe_warn_about_scaling(
"LossScaleOptimizer.apply_gradients(). This will likely result in "
"worse model quality, so please call them in the correct places! "
f"For example:{example_code}\nFor more information, see "
"https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer"
"https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer" # noqa: E501
)
elif not loss_has_been_scaled:
tf_logging.warning(
@ -116,7 +116,7 @@ def _maybe_warn_about_scaling(
"worse model quality, so please call get_scaled_loss() in the "
f"correct place! For example:{example_code}\nFor more information, "
"see "
"https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer"
"https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer" # noqa: E501
)
elif not gradients_have_been_unscaled:
tf_logging.warning(
@ -126,7 +126,7 @@ def _maybe_warn_about_scaling(
"model quality, so please call get_unscaled_gradients() in the "
f"correct place! For example:{example_code}\nFor more information, "
"see "
"https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer"
"https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer" # noqa: E501
)
@ -899,8 +899,8 @@ class LossScaleOptimizer(
loss_scale = generic_utils.deserialize_keras_object(
config.pop("loss_scale"),
module_objects={
"FixedLossScale": tf.compat.v1.mixed_precision.FixedLossScale,
"DynamicLossScale": tf.compat.v1.mixed_precision.DynamicLossScale,
"FixedLossScale": tf.compat.v1.mixed_precision.FixedLossScale, # noqa: E501
"DynamicLossScale": tf.compat.v1.mixed_precision.DynamicLossScale, # noqa: E501
},
printable_module_name="loss scale",
)

@ -164,7 +164,7 @@ class MixedPrecisionTest(test_combinations.TestCase):
with self.assertRaisesRegex(
ValueError, "the global Keras dtype Policy has been set"
):
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( # noqa: E501
gradient_descent_v2.SGD(1.0)
)
# Test no error is thrown when the policy is currently the default.

@ -240,7 +240,7 @@ class Ftrl(optimizer.Optimizer):
"initial_accumulator_value": self.initial_accumulator_value,
"l1_regularization_strength": self.l1_regularization_strength,
"l2_regularization_strength": self.l2_regularization_strength,
"l2_shrinkage_regularization_strength": self.l2_shrinkage_regularization_strength,
"l2_shrinkage_regularization_strength": self.l2_shrinkage_regularization_strength, # noqa: E501
"beta": self.beta,
}
)

@ -605,20 +605,20 @@ base_optimizer_keyword_args = """name: String. The name to use
average of the weights of the model (as the weight values change after
each training batch), and periodically overwriting the weights with
their moving average.
ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is
ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is # noqa: E501
the momentum to use when computing the EMA of the model's weights:
`new_average = ema_momentum * old_average + (1 - ema_momentum) *
current_variable_value`.
ema_overwrite_frequency: Int or None, defaults to None. Only used if
`use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we
overwrite the model variable by its moving average. If None, the optimizer
overwrite the model variable by its moving average. If None, the optimizer # noqa: E501
does not overwrite model variables in the middle of training, and you
need to explicitly overwrite the variables at the end of training
by calling `optimizer.finalize_variable_values()` (which updates the model
by calling `optimizer.finalize_variable_values()` (which updates the model # noqa: E501
variables in-place). When using the built-in `fit()` training loop, this
happens automatically after the last epoch, and you don't need to do
anything.
jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA
jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA # noqa: E501
compilation. If no GPU device is found, this flag will be ignored.
**kwargs: keyword arguments only used for backward compatibility."""
@ -943,7 +943,7 @@ class Optimizer(_BaseOptimizer):
)
tf.cond(
tf.cast(should_overwrite_model_vars, tf.bool),
true_fn=lambda: self._overwrite_model_variables_with_average_value(
true_fn=lambda: self._overwrite_model_variables_with_average_value( # noqa: E501
var_list
),
false_fn=lambda: None,

@ -300,7 +300,7 @@ class Ftrl(optimizer_v2.OptimizerV2):
"l2_regularization_strength"
),
"beta": self._serialize_hyperparameter("beta"),
"l2_shrinkage_regularization_strength": self._l2_shrinkage_regularization_strength,
"l2_shrinkage_regularization_strength": self._l2_shrinkage_regularization_strength, # noqa: E501
}
)
return config

@ -606,7 +606,8 @@ class OptimizerV2(tf.__internal__.tracking.Trackable):
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
TypeError: If `var_list` contains anything else than `Variable`
objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
# TODO(joshl): Test that we handle weight decay in a reasonable way.
@ -713,10 +714,10 @@ class OptimizerV2(tf.__internal__.tracking.Trackable):
and isinstance(
strategy,
(
tf.compat.v1.distribute.experimental.ParameterServerStrategy,
tf.compat.v1.distribute.experimental.ParameterServerStrategy, # noqa: E501
tf.distribute.experimental.ParameterServerStrategy,
tf.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy, # noqa: E501
),
)
):

@ -115,7 +115,7 @@ class RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase):
epsilon,
centered,
) in _TESTPARAMS:
with tf.compat.v1.get_default_graph().as_default(), test_utils.use_gpu():
with tf.compat.v1.get_default_graph().as_default(), test_utils.use_gpu(): # noqa: E501
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
@ -504,7 +504,7 @@ class RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase):
epsilon,
centered,
) in _TESTPARAMS:
with tf.compat.v1.get_default_graph().as_default(), test_utils.use_gpu():
with tf.compat.v1.get_default_graph().as_default(), test_utils.use_gpu(): # noqa: E501
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)

@ -130,7 +130,8 @@ def make_global_gradient_clipnorm_fn(clipnorm):
),
):
raise ValueError(
"`global_clipnorm` is not supported with `CenteralStorageStrategy`. "
"`global_clipnorm` is not supported with "
"`CenteralStorageStrategy`. "
f"The strategy used is {tf.distribute.get_strategy()}."
)

@ -343,7 +343,7 @@ class TestJson(test_combinations.TestCase):
cols = [
tf.feature_column.sequence_numeric_column("a"),
tf.feature_column.indicator_column(
tf.feature_column.sequence_categorical_column_with_vocabulary_list(
tf.feature_column.sequence_categorical_column_with_vocabulary_list( # noqa: E501
"b", ["one", "two"]
)
),

@ -463,7 +463,7 @@ if __name__ == "__main__":
"CustomNetworkWithConfigName": CustomNetworkWithConfigName,
"SubclassedModelWithConfig": SubclassedModelWithConfig,
"FunctionalSubclassModel": FunctionalSubclassModel,
"FunctionalSubclassModelWrongConfig": FunctionalSubclassModelWrongConfig,
"FunctionalSubclassModelWrongConfig": FunctionalSubclassModelWrongConfig, # noqa: E501
"WideDeepModel": WideDeepModel,
}
):

@ -311,7 +311,7 @@ def _replace_child_layer_functions(layer, serialization_cache):
continue
if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]:
serialized_functions = child_layer._trackable_saved_model_saver._get_serialized_attributes(
serialized_functions = child_layer._trackable_saved_model_saver._get_serialized_attributes( # noqa: E501
serialization_cache
).functions
else:

@ -1250,7 +1250,7 @@ class TestLayerCallTracing(tf.test.TestCase, parameterized.TestCase):
{(2, 3), (4, 5)},
set(
tuple(c.structured_input_signature[0][0].shape.as_list())
for c in fn2.wrapped_call._list_all_concrete_functions_for_serialization()
for c in fn2.wrapped_call._list_all_concrete_functions_for_serialization() # noqa: E501
),
)
@ -1263,13 +1263,13 @@ class TestLayerCallTracing(tf.test.TestCase, parameterized.TestCase):
with keras_save.tracing_scope():
fn(np.ones((2, 3)), training=True)
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(),
fn.wrapped_call._list_all_concrete_functions_for_serialization(), # noqa: E501
2,
)
with keras_save.tracing_scope():
fn(np.ones((2, 4)), training=False)
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(),
fn.wrapped_call._list_all_concrete_functions_for_serialization(), # noqa: E501
4,
)
@ -1277,13 +1277,13 @@ class TestLayerCallTracing(tf.test.TestCase, parameterized.TestCase):
with keras_save.tracing_scope():
fn(np.ones((2, 5)), True)
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(),
fn.wrapped_call._list_all_concrete_functions_for_serialization(), # noqa: E501
6,
)
with keras_save.tracing_scope():
fn(np.ones((2, 6)))
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(),
fn.wrapped_call._list_all_concrete_functions_for_serialization(), # noqa: E501
8,
)

@ -235,7 +235,7 @@ class AutoOutsideCompilationWithKerasTest(tf.test.TestCase):
# every 2 batches, we should see total of 5 event logs for each
# summary.
expected_event_counts = {
"sequential/layer_for_histogram_summary/custom_histogram_summary_v2": 5
"sequential/layer_for_histogram_summary/custom_histogram_summary_v2": 5 # noqa: E501
if enable_histograms
else 0,
"sequential/layer_for_image_summary/custom_image_summary_v2": 5,

@ -593,7 +593,7 @@ class GraphSpecificModelSubclassingTests(tf.test.TestCase):
def call(self, x):
return self.bn(self.fc(x))
with tf.compat.v1.get_default_graph().as_default(), self.cached_session():
with tf.compat.v1.get_default_graph().as_default(), self.cached_session(): # noqa: E501
model = TestModel1()
x = tf.ones(shape=[100, 784], dtype="float32")
@ -615,7 +615,7 @@ class GraphSpecificModelSubclassingTests(tf.test.TestCase):
def call(self, x):
return self.bn(self.fc(x))
with tf.compat.v1.get_default_graph().as_default(), self.cached_session():
with tf.compat.v1.get_default_graph().as_default(), self.cached_session(): # noqa: E501
model = TestModel2()
x = tf.ones(shape=[100, 784], dtype="float32")

@ -341,7 +341,7 @@ class CheckpointingTests(test_combinations.TestCase):
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step(),
optimizer_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
root.restore(tf.train.latest_checkpoint(checkpoint_directory))
@ -377,7 +377,7 @@ class CheckpointingTests(test_combinations.TestCase):
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step(),
optimizer_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
status = root.restore(
tf.train.latest_checkpoint(checkpoint_directory)
@ -410,7 +410,7 @@ class CheckpointingTests(test_combinations.TestCase):
root = tf.compat.v1.train.Checkpoint(
optimizer=optimizer,
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(),
global_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
input_value = tf.constant([[3.0]])
train_op = optimizer.minimize(
@ -464,7 +464,7 @@ class CheckpointingTests(test_combinations.TestCase):
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(),
global_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
manager = tf.train.CheckpointManager(
root, checkpoint_directory, max_to_keep=1
@ -508,7 +508,7 @@ class CheckpointingTests(test_combinations.TestCase):
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(),
global_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
checkpoint_path = tf.train.latest_checkpoint(
checkpoint_directory

@ -312,7 +312,7 @@ class AudioDatasetFromDirectoryTest(test_combinations.TestCase):
for seq_len in sequence_lengths:
self.assertIn(seq_len, possible_sequence_lengths)
def test_audio_dataset_from_directory_no_output_sequence_length_same_lengths(
def test_audio_dataset_from_directory_no_output_sequence_length_same_lengths( # noqa: E501
self,
):
# This test case tests `audio_dataset_from_directory` when `ragged` and

@ -127,30 +127,30 @@ class LayerUtilsTest(tf.test.TestCase):
reader.close()
check_str = (
'Model: "model_2"\n'
"_________________________________________________________________\n"
" Layer (type) Output Shape Param # \n"
"=================================================================\n"
" input_3 (InputLayer) [(None, None, None, 3)] 0 \n"
" \n"
" model_1 (Functional) (None, None, None, 3) 24 \n"
"|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n"
"| input_1 (InputLayer) [(None, None, None, 3)] 0 |\n"
"| |\n"
"| model (Functional) (None, None, None, 3) 24 |\n"
"||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n"
"|| input_2 (InputLayer) [(None, None, None, 3)] 0 ||\n"
"|| ||\n"
"|| conv2d (Conv2D) (None, None, None, 3) 12 ||\n"
"|| ||\n"
"|| batch_normalization (BatchN (None, None, None, 3) 12 ||\n"
"|| ormalization) ||\n"
"|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n"
"¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n"
"=================================================================\n"
"_________________________________________________________________\n" # noqa: E501
" Layer (type) Output Shape Param # \n" # noqa: E501
"=================================================================\n" # noqa: E501
" input_3 (InputLayer) [(None, None, None, 3)] 0 \n" # noqa: E501
" \n" # noqa: E501
" model_1 (Functional) (None, None, None, 3) 24 \n" # noqa: E501
"|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n" # noqa: E501
"| input_1 (InputLayer) [(None, None, None, 3)] 0 |\n" # noqa: E501
"| |\n" # noqa: E501
"| model (Functional) (None, None, None, 3) 24 |\n" # noqa: E501
"||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n" # noqa: E501
"|| input_2 (InputLayer) [(None, None, None, 3)] 0 ||\n" # noqa: E501
"|| ||\n" # noqa: E501
"|| conv2d (Conv2D) (None, None, None, 3) 12 ||\n" # noqa: E501
"|| ||\n" # noqa: E501
"|| batch_normalization (BatchN (None, None, None, 3) 12 ||\n" # noqa: E501
"|| ormalization) ||\n" # noqa: E501
"|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n" # noqa: E501
"¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n" # noqa: E501
"=================================================================\n" # noqa: E501
"Total params: 24\n"
"Trainable params: 18\n"
"Non-trainable params: 6\n"
"_________________________________________________________________\n"
"_________________________________________________________________\n" # noqa: E501
)
fin_str = ""
@ -269,23 +269,23 @@ class LayerUtilsTest(tf.test.TestCase):
reader.close()
check_str = (
"Model: "
'"trainable"\n____________________________________________________________________________\n'
" Layer (type) Output Shape Param # "
'"trainable"\n____________________________________________________________________________\n' # noqa: E501
" Layer (type) Output Shape Param # " # noqa: E501
"Trainable "
"\n============================================================================\n"
" conv (Conv2D) (None, 2, 3, 2) 62 N"
"\n============================================================================\n" # noqa: E501
" conv (Conv2D) (None, 2, 3, 2) 62 N" # noqa: E501
" \n"
" "
"\n flat (Flatten) (None, 12) 0 "
" " # noqa: E501
"\n flat (Flatten) (None, 12) 0 " # noqa: E501
"Y \n"
" "
"\n dense (Dense) (None, 5) 65 "
" " # noqa: E501
"\n dense (Dense) (None, 5) 65 " # noqa: E501
"Y \n"
" "
"\n============================================================================\nTotal"
" " # noqa: E501
"\n============================================================================\nTotal" # noqa: E501
" params: 127\nTrainable params: 65\nNon-trainable params: "
"62\n____________________________________________________________________________\n"
"____________________________________________________________________________\n"
"62\n____________________________________________________________________________\n" # noqa: E501
"____________________________________________________________________________\n" # noqa: E501
)
fin_str = ""
@ -338,35 +338,35 @@ class LayerUtilsTest(tf.test.TestCase):
reader.close()
check_str = (
"Model: "
'"model_2"\n____________________________________________________________________________\n'
" Layer (type) Output Shape Param # "
'"model_2"\n____________________________________________________________________________\n' # noqa: E501
" Layer (type) Output Shape Param # " # noqa: E501
"Trainable "
"\n============================================================================\n"
" input3 (InputLayer) [(None, None, None, 3)] 0 Y"
"\n============================================================================\n" # noqa: E501
" input3 (InputLayer) [(None, None, None, 3)] 0 Y" # noqa: E501
" \n"
" "
"\n model_1 (Functional) (None, None, None, 3) 24 "
" " # noqa: E501
"\n model_1 (Functional) (None, None, None, 3) 24 " # noqa: E501
"Y "
"\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n|"
" input1 (InputLayer) [(None, None, None, 3)] 0 Y"
"\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n|" # noqa: E501
" input1 (InputLayer) [(None, None, None, 3)] 0 Y" # noqa: E501
" |\n|"
" "
"|\n| model (Functional) (None, None, None, 3) 24 "
" " # noqa: E501
"|\n| model (Functional) (None, None, None, 3) 24 " # noqa: E501
"Y "
"|\n||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n||"
" input2 (InputLayer) [(None, None, None, 3)] 0 Y"
"|\n||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n||" # noqa: E501
" input2 (InputLayer) [(None, None, None, 3)] 0 Y" # noqa: E501
" ||\n||"
" "
"||\n|| conv2d (Conv2D) (None, None, None, 3) 12 "
" " # noqa: E501
"||\n|| conv2d (Conv2D) (None, None, None, 3) 12 " # noqa: E501
"N ||\n||"
" "
"||\n|| batch_normalization (BatchN (None, None, None, 3) 12 "
" " # noqa: E501
"||\n|| batch_normalization (BatchN (None, None, None, 3) 12 " # noqa: E501
"Y ||\n|| ormalization)"
" "
"||\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n============================================================================\nTotal"
"||\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n============================================================================\nTotal" # noqa: E501
" params: 24\nTrainable params: 6\nNon-trainable params: "
"18\n____________________________________________________________________________\n"
"____________________________________________________________________________\n"
"18\n____________________________________________________________________________\n" # noqa: E501
"____________________________________________________________________________\n" # noqa: E501
)
fin_str = ""

@ -2,4 +2,12 @@
known_first_party = keras
default_section = THIRDPARTY
line_length = 80
profile = black
profile = black
[flake8]
# imported but unused in __init__.py, that's ok.
per-file-ignores=**/__init__.py:F401
ignore=E203,W503
max-line-length=80
# Only check line-too-long and ignore other errors.
select=E501