This commit is contained in:
fchollet 2015-06-27 13:42:51 -07:00
commit 0d6575c7f9
4 changed files with 127 additions and 79 deletions

20
.travis.yml Normal file

@ -0,0 +1,20 @@
language: python
# Setup anaconda
before_install:
- wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
- chmod +x miniconda.sh
- ./miniconda.sh -b
- export PATH=/home/travis/miniconda/bin:$PATH
- conda update --yes conda
# The next couple lines fix a crash with multiprocessing on Travis and are not specific to using Miniconda
- sudo rm -rf /dev/shm
- sudo ln -s /run/shm /dev/shm
python:
- "3.4"
# command to install dependencies
install:
- conda install --yes python=$TRAVIS_PYTHON_VERSION numpy scipy matplotlib pandas pytest
# Coverage packages are on my binstar channel
- python setup.py install
# command to run tests
script: py.test

@ -27,6 +27,9 @@ def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x) return T.nnet.hard_sigmoid(x)
def linear(x): def linear(x):
'''
The function returns the variable that is passed in, so all types work
'''
return x return x
from .utils.generic_utils import get_from_module from .utils.generic_utils import get_from_module

@ -0,0 +1,104 @@
import math
import keras
import theano
import theano.tensor as T
import numpy
def list_assert_equal(a, b, round_to=7):
'''
This will do a pairwise, rounded equality test across two lists of
numbers.
'''
pairs = zip(a, b)
for i, j in pairs:
assert round(i, round_to) == round(j, round_to)
def get_standard_values():
'''
These are just a set of floats used for testing the activation
functions, and are useful in multiple tests.
'''
return [0,0.1,0.5,0.9,1.0]
def test_softmax():
from keras.activations import softmax as s
# Test using a reference implementation of softmax
def softmax(values):
m = max(values)
values = numpy.array(values)
e = numpy.exp(values - m)
dist = list(e / numpy.sum(e))
return dist
x = T.vector()
exp = s(x)
f = theano.function([x], exp)
test_values=get_standard_values()
result = f(test_values)
expected = softmax(test_values)
print(str(result))
print(str(expected))
list_assert_equal(result, expected)
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
from keras.activations import relu as r
assert r(5) == 5
assert r(-5) == 0
assert r(-0.1) == 0
assert r(0.1) == 0.1
x = T.vector()
exp = r(x)
f = theano.function([x], exp)
test_values = get_standard_values()
result = f(test_values)
list_assert_equal(result, test_values) # because no negatives in test values
def test_tanh():
from keras.activations import tanh as t
test_values = get_standard_values()
x = T.vector()
exp = t(x)
f = theano.function([x], exp)
result = f(test_values)
expected = [math.tanh(v) for v in test_values]
print(result)
print(expected)
list_assert_equal(result, expected)
def test_linear():
'''
This function does no input validation, it just returns the thing
that was passed in.
'''
from keras.activations import linear as l
xs = [1, 5, True, None, 'foo']
for x in xs:
assert x == l(x)

@ -1,79 +0,0 @@
from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.utils import np_utils
import numpy as np
import unittest
nb_classes = 10
batch_size = 128
nb_epoch = 5
weighted_class = 9
standard_weight = 1
high_weight = 5
max_train_samples = 5000
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)[:max_train_samples]
X_test = X_test.reshape(10000, 784)[:max_test_samples]
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
# convert class vectors to binary class matrices
y_train = y_train[:max_train_samples]
y_test = y_test[:max_test_samples]
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
test_ids = np.where(y_test == np.array(weighted_class))[0]
def create_model():
model = Sequential()
model.add(Dense(784, 50))
model.add(Activation('relu'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
return model
def test_weights(model, class_weight=None, sample_weight=None):
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, \
class_weight=class_weight, sample_weight=sample_weight)
score = model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
return score
class TestConcatenation(unittest.TestCase):
def test_loss_weighting(self):
class_weight = dict([(i, standard_weight) for i in range(nb_classes)])
class_weight[weighted_class] = high_weight
sample_weight = np.ones((y_train.shape[0])) * standard_weight
sample_weight[y_train == weighted_class] = high_weight
for loss in ['mae', 'mse', 'categorical_crossentropy']:
print('loss:', loss)
# no weights: reference point
model = create_model()
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
standard_score = test_weights(model)
# test class_weight
model = create_model()
model.compile(loss=loss, optimizer='rmsprop')
score = test_weights(model, class_weight=class_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
# test sample_weight
model = create_model()
model.compile(loss=loss, optimizer='rmsprop')
score = test_weights(model, sample_weight=sample_weight)
print('score:', score, ' vs.', standard_score)
self.assertTrue(score < standard_score)
if __name__ == '__main__':
print('Test class_weight and sample_weight')
unittest.main()