Small ops fixes for Torch unit tests (#316)

* Add PyTorch numpy functionality

* Add dtype conversion

* Partial fix for PyTorch numpy tests

* small logic fix

* Revert numpy_test

* Add tensor conversion to numpy

* Fix some arithmetic tests

* Fix some torch functions for numpy compatibility

* Fix pytorch ops for numpy compatibility, add TODOs

* Fix formatting

* Implement nits and fix dtype standardization

* Add pytest skipif decorator and fix nits

* Fix formatting and rename dtypes map

* Split tests by backend

* Merge space

* Fix dtype issues from new type checking

* Implement torch.full and torch.full_like numpy compatible

* Implements logspace and linspace with tensor support for start and stop

* Replace len of shape with ndim

* Fix formatting

* Implement torch.trace

* Implement eye k diagonal arg

* Implement torch.tri

* Fix formatting issues

* Fix torch.take dimensionality

* Add split functionality

* Revert torch.eye implementation to prevent conflict

* Implement all padding modes

* Adds torch image resizing and torchvision dependency.

* Fix conditional syntax

* Make torchvision import optional

* Partial implementation of torch RNN

* Duplicate torch demo file

* Small ops fixes for torch unit tests

* delete nonfunctional gpu test file

* Revert rnn and formatting fixes

* Revert progbar

* Fix formatting
This commit is contained in:
Neel Kovelamudi 2023-06-12 16:20:31 +00:00 committed by Francois Chollet
parent 586c1d9f7b
commit 83636fc191
7 changed files with 14 additions and 2 deletions

@ -416,6 +416,7 @@ def mish(x):
- [Misra, 2019](https://arxiv.org/abs/1908.08681) - [Misra, 2019](https://arxiv.org/abs/1908.08681)
""" """
x = backend.convert_to_tensor(x)
return Mish.static_call(x) return Mish.static_call(x)

@ -58,7 +58,7 @@ def logsumexp(x, axis=None, keepdims=False):
max_x = torch.max(x) max_x = torch.max(x)
return torch.log(torch.sum(torch.exp(x - max_x))) + max_x return torch.log(torch.sum(torch.exp(x - max_x))) + max_x
max_x = torch.max(x, dim=axis, keepdim=True).values max_x = torch.amax(x, dim=axis, keepdim=True)
result = ( result = (
torch.log(torch.sum(torch.exp(x - max_x), dim=axis, keepdim=True)) torch.log(torch.sum(torch.exp(x - max_x), dim=axis, keepdim=True))
+ max_x + max_x

@ -134,6 +134,8 @@ def append(
def arange(start, stop=None, step=1, dtype=None): def arange(start, stop=None, step=1, dtype=None):
dtype = to_torch_dtype(dtype) dtype = to_torch_dtype(dtype)
if stop is None:
return torch.arange(end=start, dtype=dtype)
return torch.arange(start, stop, step=step, dtype=dtype) return torch.arange(start, stop, step=step, dtype=dtype)

@ -1,5 +1,6 @@
import numpy as np import numpy as np
from keras_core import backend
from keras_core import constraints from keras_core import constraints
from keras_core import testing from keras_core import testing
@ -35,12 +36,14 @@ class ConstraintsTest(testing.TestCase):
def test_unit_norm(self): def test_unit_norm(self):
constraint_fn = constraints.UnitNorm() constraint_fn = constraints.UnitNorm()
output = constraint_fn(get_example_array()) output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0)) l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertAllClose(l2, 1.0) self.assertAllClose(l2, 1.0)
def test_min_max_norm(self): def test_min_max_norm(self):
constraint_fn = constraints.MinMaxNorm(min_value=0.2, max_value=0.5) constraint_fn = constraints.MinMaxNorm(min_value=0.2, max_value=0.5)
output = constraint_fn(get_example_array()) output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0)) l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertFalse(l2[l2 < 0.2]) self.assertFalse(l2[l2 < 0.2])
self.assertFalse(l2[l2 > 0.5 + 1e-6]) self.assertFalse(l2[l2 > 0.5 + 1e-6])

@ -68,6 +68,7 @@ class NormalizationTest(testing.TestCase, parameterized.TestCase):
layer.adapt(data) layer.adapt(data)
self.assertTrue(layer.built) self.assertTrue(layer.built)
output = layer(x) output = layer(x)
output = backend.convert_to_numpy(output)
self.assertAllClose(np.var(output, axis=0), 1.0, atol=1e-5) self.assertAllClose(np.var(output, axis=0), 1.0, atol=1e-5)
self.assertAllClose(np.mean(output, axis=0), 0.0, atol=1e-5) self.assertAllClose(np.mean(output, axis=0), 0.0, atol=1e-5)
@ -84,6 +85,7 @@ class NormalizationTest(testing.TestCase, parameterized.TestCase):
layer.adapt(data) layer.adapt(data)
self.assertTrue(layer.built) self.assertTrue(layer.built)
output = layer(x) output = layer(x)
output = backend.convert_to_numpy(output)
self.assertAllClose(np.var(output, axis=(0, 3)), 1.0, atol=1e-5) self.assertAllClose(np.var(output, axis=(0, 3)), 1.0, atol=1e-5)
self.assertAllClose(np.mean(output, axis=(0, 3)), 0.0, atol=1e-5) self.assertAllClose(np.mean(output, axis=(0, 3)), 0.0, atol=1e-5)

@ -1,6 +1,7 @@
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from keras_core import backend
from keras_core import layers from keras_core import layers
from keras_core import testing from keras_core import testing
@ -36,6 +37,7 @@ class RandomBrightnessTest(testing.TestCase):
inputs = np.random.randint(0, 255, size=(224, 224, 3)) inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs) output = layer(inputs)
diff = output - inputs diff = output - inputs
diff = backend.convert_to_numpy(diff)
self.assertTrue(np.amin(diff) >= 0) self.assertTrue(np.amin(diff) >= 0)
self.assertTrue(np.mean(diff) > 0) self.assertTrue(np.mean(diff) > 0)
@ -45,6 +47,7 @@ class RandomBrightnessTest(testing.TestCase):
inputs = np.random.randint(0, 255, size=(224, 224, 3)) inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs) output = layer(inputs)
diff = output - inputs diff = output - inputs
diff = backend.convert_to_numpy(diff)
self.assertTrue(np.amax(diff) <= 0) self.assertTrue(np.amax(diff) <= 0)
self.assertTrue(np.mean(diff) < 0) self.assertTrue(np.mean(diff) < 0)

@ -9,7 +9,8 @@ from keras_core.operations import operation
class OpWithMultipleInputs(operation.Operation): class OpWithMultipleInputs(operation.Operation):
def call(self, x, y, z=None): def call(self, x, y, z=None):
return x + 2 * y + 3 * z return 3 * z + x + 2 * y
# Order of operations issue with torch backend
def compute_output_spec(self, x, y, z=None): def compute_output_spec(self, x, y, z=None):
return keras_tensor.KerasTensor(x.shape, x.dtype) return keras_tensor.KerasTensor(x.shape, x.dtype)