keras/examples/cifar10_cnn.py

120 lines
4.4 KiB
Python
Raw Normal View History

2015-12-09 02:49:14 +00:00
'''Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
Note: the data was pickled with Python 2, and some encoding issues might prevent you
from loading it in Python 3. You might have to load it in Python 2,
save it in a different format, load it in Python 3 and repickle it.
'''
from __future__ import print_function
2015-03-28 00:59:42 +00:00
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad
from keras.utils import np_utils, generic_utils
from six.moves import range
2015-03-28 00:59:42 +00:00
batch_size = 32
nb_classes = 10
nb_epoch = 200
data_augmentation = True
2015-10-05 01:44:49 +00:00
# input image dimensions
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
2015-10-05 01:44:49 +00:00
img_channels = 3
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
2015-03-28 00:59:42 +00:00
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
2015-11-20 04:13:49 +00:00
model.add(Convolution2D(32, 3, 3, border_mode='same',
2015-10-05 01:44:49 +00:00
input_shape=(img_channels, img_rows, img_cols)))
2015-03-28 00:59:42 +00:00
model.add(Activation('relu'))
2015-10-05 01:44:49 +00:00
model.add(Convolution2D(32, 3, 3))
2015-03-28 00:59:42 +00:00
model.add(Activation('relu'))
2015-10-05 01:44:49 +00:00
model.add(MaxPooling2D(pool_size=(2, 2)))
2015-03-28 00:59:42 +00:00
model.add(Dropout(0.25))
2015-11-20 04:13:49 +00:00
model.add(Convolution2D(64, 3, 3, border_mode='same'))
2015-03-28 00:59:42 +00:00
model.add(Activation('relu'))
2015-10-05 01:44:49 +00:00
model.add(Convolution2D(64, 3, 3))
2015-03-28 00:59:42 +00:00
model.add(Activation('relu'))
2015-10-05 01:44:49 +00:00
model.add(MaxPooling2D(pool_size=(2, 2)))
2015-03-28 00:59:42 +00:00
model.add(Dropout(0.25))
2015-04-16 07:50:32 +00:00
model.add(Flatten())
2015-10-05 01:44:49 +00:00
model.add(Dense(512))
2015-03-28 00:59:42 +00:00
model.add(Activation('relu'))
model.add(Dropout(0.5))
2015-10-05 01:44:49 +00:00
model.add(Dense(nb_classes))
2015-03-28 00:59:42 +00:00
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
2015-03-28 00:59:42 +00:00
model.compile(loss='categorical_crossentropy', optimizer=sgd)
2015-12-09 02:49:14 +00:00
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
2015-03-28 00:59:42 +00:00
if not data_augmentation:
2015-12-09 02:49:14 +00:00
print('Not using data augmentation or normalization')
2015-07-04 21:28:24 +00:00
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch)
2015-03-28 00:59:42 +00:00
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
print('Test score:', score)
2015-03-28 00:59:42 +00:00
else:
2015-12-09 02:49:14 +00:00
print('Using real time data augmentation')
2015-03-28 00:59:42 +00:00
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
2015-07-26 08:00:18 +00:00
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
2015-03-28 00:59:42 +00:00
2015-07-22 01:31:49 +00:00
# compute quantities required for featurewise normalization
2015-03-28 00:59:42 +00:00
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
for e in range(nb_epoch):
print('-'*40)
print('Epoch', e)
print('-'*40)
2015-12-09 02:49:14 +00:00
print('Training...')
2015-03-28 00:59:42 +00:00
# batch train with realtime data augmentation
progbar = generic_utils.Progbar(X_train.shape[0])
for X_batch, Y_batch in datagen.flow(X_train, Y_train):
2015-07-04 21:28:24 +00:00
loss = model.train_on_batch(X_batch, Y_batch)
2015-12-09 02:49:14 +00:00
progbar.add(X_batch.shape[0], values=[('train loss', loss[0])])
2015-03-28 00:59:42 +00:00
2015-12-09 02:49:14 +00:00
print('Testing...')
2015-03-28 00:59:42 +00:00
# test time!
progbar = generic_utils.Progbar(X_test.shape[0])
for X_batch, Y_batch in datagen.flow(X_test, Y_test):
2015-07-04 21:28:24 +00:00
score = model.test_on_batch(X_batch, Y_batch)
2015-12-09 02:49:14 +00:00
progbar.add(X_batch.shape[0], values=[('test loss', score[0])])