These changes speeds up travis testing time 2 times using some pytest and travis configuration options.

Summary of changes:
 - py.test is configured to display test profiling information that shows 10 slowest tests. This would allow additional speed ups if anyone has ideas on some particular test. The slowest test is usually cifar dataset test and tensorflow convolutions. It seems that there are some other IT tests that could be sped up.
 - py.test is configured to run with pytest-xdist with 2 processes in parallel because travis does provide multicore support (1.5 cores) and because the slowest cifar test spends time on download which can run in parallel with other tests.
 - travis is configured to split backend tests into test matrix to make parallel theano vs tensorflow testing as opposed to rerun all the tests twice for python 2.7.
 - pickle filenames in tests are renamed to avoid clashes during multiprocessing
This commit is contained in:
olegsinyavskiy 2015-12-02 22:09:59 -08:00
parent 5956dbe8fa
commit 4781f40eb6
5 changed files with 40 additions and 25 deletions

@ -1,9 +1,14 @@
sudo: required sudo: required
dist: trusty dist: trusty
language: python language: python
python: matrix:
- "2.7" include:
- "3.4" - python: 3.4
env: KERAS_BACKEND=theano
- python: 2.7
env: KERAS_BACKEND=theano
- python: 2.7
env: KERAS_BACKEND=tensorflow
install: install:
# code below is taken from http://conda.pydata.org/docs/travis.html # code below is taken from http://conda.pydata.org/docs/travis.html
# We do this conditionally because it saves us some downloading if the # We do this conditionally because it saves us some downloading if the
@ -23,7 +28,7 @@ install:
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy scipy matplotlib pandas pytest h5py - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy scipy matplotlib pandas pytest h5py
- source activate test-environment - source activate test-environment
- pip install pytest-cov python-coveralls - pip install pytest-cov python-coveralls pytest-xdist
- pip install git+git://github.com/Theano/Theano.git - pip install git+git://github.com/Theano/Theano.git
- python setup.py install - python setup.py install
@ -33,10 +38,11 @@ install:
fi fi
# command to run tests # command to run tests
script: script:
- PYTHONPATH=$PWD:$PYTHONPATH py.test -v --cov-report term-missing --cov keras tests/ # run keras backend init to initialize backend config
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then - python -c "import keras.backend"
sed -i -e 's/theano/tensorflow/g' ~/.keras/keras.json; # set up keras backend
PYTHONPATH=$PWD:$PYTHONPATH py.test -v --cov-report term-missing --cov keras tests/; - sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json;
fi - echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)"
- PYTHONPATH=$PWD:$PYTHONPATH py.test tests/
after_success: after_success:
- coveralls - coveralls

@ -27,7 +27,9 @@ else:
_config = {'floatx': floatx(), _config = {'floatx': floatx(),
'epsilon': epsilon(), 'epsilon': epsilon(),
'backend': _BACKEND} 'backend': _BACKEND}
json.dump(_config, open(_config_path, 'w')) with open(_config_path, 'w') as f:
# add new line in order for bash 'cat' display the content correctly
f.write(json.dumps(_config) + '\n')
if _BACKEND == 'theano': if _BACKEND == 'theano':
print('Using Theano backend.') print('Using Theano backend.')
@ -36,4 +38,4 @@ elif _BACKEND == 'tensorflow':
print('Using TensorFlow backend.') print('Using TensorFlow backend.')
from .tensorflow_backend import * from .tensorflow_backend import *
else: else:
raise Exception('Unknown backend: ' + str(backend)) raise Exception('Unknown backend: ' + str(_BACKEND))

7
pytest.ini Normal file

@ -0,0 +1,7 @@
# Configuration of py.test
[pytest]
addopts=-v
-n 2
--durations=10
--cov-report term-missing
--cov=keras

@ -139,7 +139,7 @@ class TestGraph(unittest.TestCase):
assert(loss < 4.) assert(loss < 4.)
print('test weight saving') print('test weight saving')
graph.save_weights('temp.h5', overwrite=True) graph.save_weights('test_2o_1i_weights_temp.h5', overwrite=True)
graph = Graph() graph = Graph()
graph.add_input(name='input1', input_shape=(32,)) graph.add_input(name='input1', input_shape=(32,))
graph.add_node(Dense(16), name='dense1', input='input1') graph.add_node(Dense(16), name='dense1', input='input1')
@ -148,7 +148,7 @@ class TestGraph(unittest.TestCase):
graph.add_output(name='output1', input='dense2') graph.add_output(name='output1', input='dense2')
graph.add_output(name='output2', input='dense3') graph.add_output(name='output2', input='dense3')
graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'}) graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
graph.load_weights('temp.h5') graph.load_weights('test_2o_1i_weights_temp.h5')
nloss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test}) nloss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
print(nloss) print(nloss)
assert(loss == nloss) assert(loss == nloss)

@ -61,14 +61,14 @@ class TestSequential(unittest.TestCase):
model.get_config(verbose=0) model.get_config(verbose=0)
print('test weight saving') print('test weight saving')
model.save_weights('temp.h5', overwrite=True) model.save_weights('test_sequential_temp.h5', overwrite=True)
model = Sequential() model = Sequential()
model.add(Dense(nb_hidden, input_shape=(input_dim,))) model.add(Dense(nb_hidden, input_shape=(input_dim,)))
model.add(Activation('relu')) model.add(Activation('relu'))
model.add(Dense(nb_class)) model.add(Dense(nb_class))
model.add(Activation('softmax')) model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights('temp.h5') model.load_weights('test_sequential_temp.h5')
nloss = model.evaluate(X_train, y_train, verbose=0) nloss = model.evaluate(X_train, y_train, verbose=0)
assert(loss == nloss) assert(loss == nloss)
@ -114,7 +114,7 @@ class TestSequential(unittest.TestCase):
model.get_config(verbose=0) model.get_config(verbose=0)
print('test weight saving') print('test weight saving')
model.save_weights('temp.h5', overwrite=True) model.save_weights('test_merge_sum_temp.h5', overwrite=True)
left = Sequential() left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,))) left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu')) left.add(Activation('relu'))
@ -125,7 +125,7 @@ class TestSequential(unittest.TestCase):
model.add(Merge([left, right], mode='sum')) model.add(Merge([left, right], mode='sum'))
model.add(Dense(nb_class)) model.add(Dense(nb_class))
model.add(Activation('softmax')) model.add(Activation('softmax'))
model.load_weights('temp.h5') model.load_weights('test_merge_sum_temp.h5')
model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
nloss = model.evaluate([X_train, X_train], y_train, verbose=0) nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
@ -205,7 +205,7 @@ class TestSequential(unittest.TestCase):
model.get_config(verbose=0) model.get_config(verbose=0)
print('test weight saving') print('test weight saving')
model.save_weights('temp.h5', overwrite=True) model.save_weights('test_merge_concat_temp.h5', overwrite=True)
left = Sequential() left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,))) left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu')) left.add(Activation('relu'))
@ -221,7 +221,7 @@ class TestSequential(unittest.TestCase):
model.add(Activation('softmax')) model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights('temp.h5') model.load_weights('test_merge_concat_temp.h5')
nloss = model.evaluate([X_train, X_train], y_train, verbose=0) nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
assert(loss == nloss) assert(loss == nloss)
@ -268,8 +268,8 @@ class TestSequential(unittest.TestCase):
model.predict_proba([X_test, X_test, X_test], verbose=0) model.predict_proba([X_test, X_test, X_test], verbose=0)
model.get_config(verbose=0) model.get_config(verbose=0)
model.save_weights('temp.h5', overwrite=True) model.save_weights('test_merge_recursivity_temp.h5', overwrite=True)
model.load_weights('temp.h5') model.load_weights('test_merge_recursivity_temp.h5')
nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0) nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
print(nloss) print(nloss)
@ -305,8 +305,8 @@ class TestSequential(unittest.TestCase):
model.predict_proba(X_test, verbose=0) model.predict_proba(X_test, verbose=0)
model.get_config(verbose=0) model.get_config(verbose=0)
model.save_weights('temp.h5', overwrite=True) model.save_weights('test_merge_overlap_temp.h5', overwrite=True)
model.load_weights('temp.h5') model.load_weights('test_merge_overlap_temp.h5')
nloss = model.evaluate(X_train, y_train, verbose=0) nloss = model.evaluate(X_train, y_train, verbose=0)
print(nloss) print(nloss)
@ -359,7 +359,7 @@ class TestSequential(unittest.TestCase):
model.get_config(verbose=0) model.get_config(verbose=0)
print('test weight saving') print('test weight saving')
model.save_weights('temp.h5', overwrite=True) model.save_weights('test_lambda_temp.h5', overwrite=True)
left = Sequential() left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,))) left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu')) left.add(Activation('relu'))
@ -371,7 +371,7 @@ class TestSequential(unittest.TestCase):
output_shape=output_shape)) output_shape=output_shape))
model.add(Dense(nb_class)) model.add(Dense(nb_class))
model.add(Lambda(activation)) model.add(Lambda(activation))
model.load_weights('temp.h5') model.load_weights('test_lambda_temp.h5')
model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
nloss = model.evaluate([X_train, X_train], y_train, verbose=0) nloss = model.evaluate([X_train, X_train], y_train, verbose=0)