keras/examples/mnist_cnn.py

77 lines
2.6 KiB
Python
Raw Normal View History

2015-04-30 00:17:22 +00:00
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
2015-07-26 08:00:18 +00:00
np.random.seed(1337) # for reproducibility
2015-04-30 00:17:22 +00:00
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
2015-05-04 17:31:03 +00:00
from keras.utils import np_utils
2015-04-30 00:17:22 +00:00
'''
Train a simple convnet on the MNIST dataset.
Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python mnist_cnn.py
Get to 99.25% test accuracy after 12 epochs (there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
2015-04-30 00:17:22 +00:00
'''
2015-06-19 19:52:43 +00:00
batch_size = 128
2015-04-30 00:17:22 +00:00
nb_classes = 10
nb_epoch = 12
2015-05-04 17:31:03 +00:00
# shape of the image (SHAPE x SHAPE)
shapex, shapey = 28, 28
# number of convolutional filters to use
nb_filters = 32
# level of pooling to perform (POOL x POOL)
nb_pool = 2
# level of convolution to perform (CONV x CONV)
nb_conv = 3
2015-04-30 00:17:22 +00:00
# the data, shuffled and split between tran and test sets
2015-05-04 03:14:51 +00:00
(X_train, y_train), (X_test, y_test) = mnist.load_data()
2015-05-04 17:31:03 +00:00
X_train = X_train.reshape(X_train.shape[0], 1, shapex, shapey)
X_test = X_test.reshape(X_test.shape[0], 1, shapex, shapey)
2015-05-04 03:14:51 +00:00
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
2015-04-30 00:17:22 +00:00
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, 1, nb_conv, nb_conv, border_mode='full'))
2015-04-30 00:17:22 +00:00
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_filters, nb_conv, nb_conv))
2015-04-30 00:17:22 +00:00
model.add(Activation('relu'))
2015-10-04 20:41:45 +00:00
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
# the resulting image after conv and pooling is the original shape
# divided by the pooling with a number of filters for each "pixel"
# (the number of filters is determined by the last Conv2D)
model.add(Dense(nb_filters * (shapex / nb_pool) * (shapey / nb_pool), 128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(128, nb_classes))
2015-04-30 00:17:22 +00:00
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
2015-04-30 00:17:22 +00:00
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
2015-05-04 17:31:03 +00:00
score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])