From 4e0a920ea106afc36a03158b4f1a8f5a70a8e39c Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:53:10 +0800 Subject: [PATCH] Clean up duplicated `inputs_quantizer` (#19604) * Cleanup duplicated `inputs_quantizer` and add type check for `input_spec` and `supports_masking` * Revert setter --- keras/src/layers/core/dense.py | 2 -- keras/src/layers/core/einsum_dense.py | 4 ---- 2 files changed, 6 deletions(-) diff --git a/keras/src/layers/core/dense.py b/keras/src/layers/core/dense.py index da32e0107..8b78a2851 100644 --- a/keras/src/layers/core/dense.py +++ b/keras/src/layers/core/dense.py @@ -557,8 +557,6 @@ class Dense(Layer): self._tracker.unlock() if mode == "int8": - # Configure `self.inputs_quantizer` - self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) # Quantize `self._kernel` to int8 and compute corresponding scale kernel_value, kernel_scale = quantizers.abs_max_quantize( self._kernel, axis=0 diff --git a/keras/src/layers/core/einsum_dense.py b/keras/src/layers/core/einsum_dense.py index bfc7eff43..a884171de 100644 --- a/keras/src/layers/core/einsum_dense.py +++ b/keras/src/layers/core/einsum_dense.py @@ -684,10 +684,6 @@ class EinsumDense(Layer): self._custom_gradient_equation, self._kernel_reverse_transpose_axes, ) = _analyze_quantization_info(self.equation, self.input_spec.ndim) - # Configure `self.inputs_quantizer` - self.inputs_quantizer = quantizers.AbsMaxQuantizer( - axis=self._input_reduced_axes - ) # Quantize `self._kernel` to int8 and compute corresponding scale kernel_value, kernel_scale = quantizers.abs_max_quantize( self._kernel, axis=self._kernel_reduced_axes