fix the rest

This commit is contained in:
Haifeng Jin 2022-05-31 09:04:41 +00:00 committed by GitHub
parent 564b8d9287
commit 5cf72f4934
25 changed files with 44 additions and 74 deletions

@ -16,6 +16,7 @@ from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import os
import time
import uuid

@ -1564,7 +1564,7 @@ class ModelCheckpoint(Callback):
)
self._maybe_remove_file()
except IsADirectoryError as e: # h5py 3.x
except IsADirectoryError: # h5py 3.x
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "

@ -33,8 +33,8 @@ from absl.testing import parameterized
import keras
from keras.callbacks import BackupAndRestore
from keras.callbacks import Callback
from keras.callbacks import BackupAndRestoreExperimental
from keras.callbacks import Callback
from keras.engine import sequential
from keras.layers import Activation
from keras.layers import Dense
@ -387,7 +387,7 @@ class KerasCallbacksTest(test_combinations.TestCase):
if epoch == 5 or epoch == 12:
raise RuntimeError("Interruption")
log_dir = self.get_temp_dir()
self.get_temp_dir()
# The following asserts that the train counter is fault tolerant.
self.assertEqual(model._train_counter.numpy(), 0)
@ -462,7 +462,8 @@ class KerasCallbacksTest(test_combinations.TestCase):
)
class InterruptingCallback(keras.callbacks.Callback):
"""A callback to intentionally introduce interruption to training."""
"""A callback to intentionally introduce interruption to
training."""
batch_count = 0

@ -62,8 +62,8 @@ class WorkerTrainingState:
backend.set_value(
self._ckpt_saved_batch, self.CKPT_SAVED_BATCH_UNUSED_VALUE
)
# _ckpt_saved_epoch and _ckpt_saved_batch gets tracked and is included in
# the checkpoint file when backing up.
# _ckpt_saved_epoch and _ckpt_saved_batch gets tracked and is included
# in the checkpoint file when backing up.
checkpoint = tf.train.Checkpoint(
model=self._model,
ckpt_saved_epoch=self._ckpt_saved_epoch,
@ -155,8 +155,8 @@ class WorkerTrainingState:
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the (epoch, step) the training is supposed to
continue at. Otherwise, return the `initial_epoch, initial_step` the user
passes in.
continue at. Otherwise, return the `initial_epoch, initial_step` the
user passes in.
"""
initial_step = 0
@ -165,19 +165,20 @@ class WorkerTrainingState:
if mode == mode_keys.ModeKeys.TRAIN:
if self._save_freq == "epoch":
if epoch >= 0:
# The most recently saved epoch is one epoch prior to the epoch it
# failed at, so return the value of 'self._ckpt_saved_epoch' plus one.
# The most recently saved epoch is one epoch prior to the
# epoch it failed at, so return the value of
# 'self._ckpt_saved_epoch' plus one.
initial_epoch = epoch + 1
else:
if batch >= 0 and epoch >= 0:
# If the checkpoint was last saved at last batch of the epoch, return
# the next epoch number and batch=0
# If the checkpoint was last saved at last batch of the
# epoch, return the next epoch number and batch=0
if batch == steps_per_epoch - 1:
initial_epoch = epoch + 1
initial_step = 0
else:
# If the checkpoint was not last saved at last batch of the epoch,
# return the same epoch and next batch number
# If the checkpoint was not last saved at last batch of
# the epoch, return the same epoch and next batch number
initial_epoch = epoch
initial_step = batch + 1
return (initial_epoch, initial_step)

@ -181,9 +181,7 @@ class LazyInitVariable(resource_variable_ops.BaseResourceVariable):
# TODO(scottzhu): This method and create_and_initialize might be removed if
# we decide to just use the tf.Variable to replace this class.
def initialize(self):
with ops.name_scope(
self._name, "Variable", skip_on_eager=False
) as name:
with ops.name_scope(self._name, "Variable", skip_on_eager=False):
with ops.colocate_with(self._handle), ops.name_scope("Initializer"):
if callable(self._initial_value):
initial_value = self._initial_value()

@ -685,6 +685,7 @@ class Layer(tf.Module, version_utils.LayerVersionSelector):
and dtype.is_floating
):
old_getter = getter
# Wrap variable constructor to return an AutoCastVariable.
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
@ -3082,9 +3083,8 @@ class Layer(tf.Module, version_utils.LayerVersionSelector):
if (
name == "_self_setattr_tracking"
or not getattr(self, "_self_setattr_tracking", True)
or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)
or hasattr(self.__class__, name)
):
try:
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(

@ -1279,10 +1279,9 @@ class Layer(base_layer.Layer):
if (
tf.distribute.has_strategy()
and tf.distribute.in_cross_replica_context()
and
# When saving the model, the distribution strategy context should be
# ignored, following the default path for adding updates.
not call_context.saving
and not call_context.saving
):
# Updates don't need to be run in a cross-replica context.
return
@ -2330,9 +2329,8 @@ class Layer(base_layer.Layer):
if (
name == "_self_setattr_tracking"
or not getattr(self, "_self_setattr_tracking", True)
or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)
or hasattr(self.__class__, name)
):
try:
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(

@ -1237,9 +1237,8 @@ def _should_skip_first_node(layer):
if layer._self_tracked_trackables:
return (
isinstance(layer, Functional)
and
# Filter out Sequential models without an input shape.
isinstance(
and isinstance(
layer._self_tracked_trackables[0], input_layer_module.InputLayer
)
)

@ -18,4 +18,4 @@
Everything has been moved to keras/saving/. This file will be deleted soon.
"""
from keras.saving import * # noqa: F401
from keras.saving import * # noqa: F401,F403

@ -333,7 +333,7 @@ class Sequential(functional.Functional):
# Create Functional API connection by calling the
# current layer
layer_output = layer(layer_input)
except: # pylint:disable=bare-except
except: # noqa: E722
# Functional API calls may fail for a number of
# reasons: 1) The layer may be buggy. In this case
# it will be easier for the user to debug if we fail
@ -367,7 +367,7 @@ class Sequential(functional.Functional):
# not be supporting such layers.
self._init_graph_network(inputs, outputs)
self._graph_initialized = True
except: # pylint:disable=bare-except
except: # noqa: E722
self._use_legacy_deferred_behavior = True
self._inferred_input_shape = new_shape

@ -1547,7 +1547,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
(
data_handler._initial_epoch,
data_handler._initial_step,
) = self._maybe_load_initial_counters_from_ckpt( # pylint: disable=protected-access
) = self._maybe_load_initial_counters_from_ckpt(
steps_per_epoch_inferred, initial_epoch
)
logs = None
@ -3523,8 +3523,8 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the (epoch, step) the training is supposed to
continue at. Otherwise, return the `initial_epoch, initial_step` the user
passes in.
continue at. Otherwise, return the `initial_epoch, initial_step` the
user passes in.
"""
initial_step = 0
if self._training_state is not None:

@ -1723,7 +1723,7 @@ class TrainingTest(test_combinations.TestCase):
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(x, y, epochs=2)
model.fit(x, y, epochs=2)
policy.set_global_policy("float32")
@test_combinations.run_all_keras_modes
@ -2368,10 +2368,8 @@ class LossWeightingTest(test_combinations.TestCase):
y_train[:batch_size],
class_weight=class_weight,
)
ref_score = model.evaluate(
x_test, y_test, verbose=0
) # pylint: disable=unused-variable
score = model.evaluate( # pylint: disable=unused-variable
ref_score = model.evaluate(x_test, y_test, verbose=0) # noqa: F841
score = model.evaluate( # noqa: F841
x_test[test_ids, :], y_test[test_ids, :], verbose=0
)
# TODO(b/152990697): Fix the class weights test here.

@ -70,7 +70,7 @@ class MultiWorkerTutorialTest(parameterized.TestCase, tf.test.TestCase):
def skip_fetch_failure_exception(self):
try:
yield
except zipfile.BadZipfile as e:
except zipfile.BadZipfile:
# There can be a race when multiple processes are downloading the
# data. Skip the test if that results in loading errors.
self.skipTest(

@ -898,9 +898,7 @@ class BatchNormalizationBase(Layer):
# Determine a boolean value for `training`: could be True, False, or
# None.
training_value = control_flow_util.constant_value(training)
if (
training_value == False
): # pylint: disable=singleton-comparison,g-explicit-bool-comparison
if training_value == False: # noqa: E712
mean, variance = self.moving_mean, self.moving_variance
else:
if self.adjustment:

@ -209,9 +209,8 @@ class DeterministicRandomTestToolTest(tf.test.TestCase):
a_prime = tf.random.uniform(shape=(3, 1))
a_prime = a_prime * 3
error_string = "An exception should have been raised before this"
error_raised = "An exception should have been raised before this"
try:
c = tf.random.uniform(shape=(3, 1))
tf.random.uniform(shape=(3, 1))
raise RuntimeError(error_string)
except ValueError as err:

@ -385,17 +385,8 @@ class LossScaleOptimizerTest(tf.test.TestCase, parameterized.TestCase):
self.assertEqual(self.evaluate(opt.loss_scale), 8)
# Test Inf gradients are still skipped instead of being clipped
<<<<<<< HEAD
loss = lambda: var * float("Inf")
run_fn = lambda: opt.minimize(loss, var_list=[var])
=======
def run_fn():
def loss():
return var * float("Inf")
return opt.minimize(loss, var_list=[var])
>>>>>>> 0bb24689 (fix F811)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
self.assertAllClose(
@ -426,17 +417,8 @@ class LossScaleOptimizerTest(tf.test.TestCase, parameterized.TestCase):
self.assertEqual(4.0, self.evaluate(opt.loss_scale))
# Test optimizer with NaN gradients
<<<<<<< HEAD
loss = lambda: var * float("NaN")
run_fn = lambda: opt.minimize(loss, var_list=[var])
=======
def run_fn():
def loss():
return var * float("NaN")
return opt.minimize(loss, var_list=[var])
>>>>>>> 0bb24689 (fix F811)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.

@ -713,9 +713,8 @@ class KerasObjectLoader:
for node_id, (node, _) in self.loaded_nodes.items():
if (
not isinstance(node, base_layer.Layer)
or
# Don't finalize models until all layers have finished loading.
node_id in self.model_layer_dependencies
or node_id in self.model_layer_dependencies
):
continue

@ -1125,7 +1125,7 @@ class TestSavedModelFormat(tf.test.TestCase):
class Model(keras.models.Model):
def __init__(self):
super().__init__()
self.layer = CustomLayer()
self.layer = CustomLayer() # noqa: F821
@tf.function(input_signature=[tf.TensorSpec([None, 1])])
def call(self, inputs):

@ -365,7 +365,7 @@ def try_build_compiled_arguments(model):
model.compiled_loss.build(model.outputs)
if not model.compiled_metrics.built:
model.compiled_metrics.build(model.outputs, model.outputs)
except: # pylint: disable=bare-except
except: # noqa: E722
logging.warning(
"Compiled the loaded model, but the compiled metrics have "
"yet to be built. `model.compile_metrics` will be empty "

@ -18,8 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from keras.saving.utils_v1.export_output import *
from keras.saving.utils_v1.export_output import * # noqa: F403
from keras.saving.utils_v1.export_utils import EXPORT_TAG_MAP
from keras.saving.utils_v1.export_utils import SIGNATURE_KEY_MAP
from keras.saving.utils_v1.export_utils import build_all_signature_defs
@ -28,5 +27,4 @@ from keras.saving.utils_v1.export_utils import get_export_outputs
from keras.saving.utils_v1.export_utils import get_temp_export_dir
from keras.saving.utils_v1.export_utils import get_timestamped_export_dir
# pylint: enable=wildcard-import
# LINT.ThenChange(//tensorflow/python/saved_model/model_utils/__init__.py)

@ -55,7 +55,7 @@ class KerasDoctestOutputCheckerTest(parameterized.TestCase):
["text1.0 text", []],
["text 1.0text", []],
["text1.0text", []],
["0x12e4", []], # not 12000
["0x12e4", []], # not 12000
["TensorBoard: http://128.0.0.1:8888", []],
# With a newline
["1.0 text\n 2.0 3.0 text", [1.0, 2.0, 3.0]],

@ -32,7 +32,7 @@ tf.compat.v1.enable_v2_behavior()
# We put doctest after absltest so that it picks up the unittest monkeypatch.
# Otherwise doctest tests aren't runnable at all.
import doctest # pylint: disable=g-import-not-at-top,g-bad-import-order
import doctest # noqa: E402
FLAGS = flags.FLAGS

@ -298,7 +298,7 @@ def get_file(
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise

@ -15,6 +15,6 @@
"""Keras model mode constants."""
# isort: off
from tensorflow.python.saved_model.model_utils.mode_keys import ( # noqa: E501
from tensorflow.python.saved_model.model_utils.mode_keys import ( # noqa: F401,E501
KerasModeKeys as ModeKeys,
)

@ -6,7 +6,5 @@ profile=black
[flake8]
# imported but unused in __init__.py, that's ok.
per-file-ignores=**/__init__.py:F401
ignore=E203,W503
ignore=E203,W503,F632,E266,E731,E712,E741
max-line-length=80
# Only check line-too-long and ignore other errors.
select=E501