keras/examples/imdb_cnn.py

77 lines
2.6 KiB
Python
Raw Normal View History

2015-07-14 20:34:05 +00:00
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
2015-10-05 01:44:49 +00:00
np.random.seed(1337) # for reproducibility
2015-07-14 20:34:05 +00:00
from keras.preprocessing import sequence
from keras.optimizers import RMSprop
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.datasets import imdb
2015-07-15 03:35:28 +00:00
'''
This example demonstrates the use of Convolution1D
for text classification.
Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python imdb_cnn.py
Get to 0.8330 test accuracy after 3 epochs. 100s/epoch on K520 GPU.
'''
2015-07-14 20:34:05 +00:00
# set parameters:
max_features = 5000
maxlen = 100
2015-07-15 03:35:28 +00:00
batch_size = 32
2015-07-14 20:34:05 +00:00
embedding_dims = 100
2015-10-05 01:44:49 +00:00
nb_filter = 250
2015-07-14 20:34:05 +00:00
filter_length = 3
hidden_dims = 250
2015-07-15 03:35:28 +00:00
nb_epoch = 3
2015-07-14 20:34:05 +00:00
print("Loading data...")
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features,
test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
2015-10-05 14:09:44 +00:00
model.add(Embedding(max_features, embedding_dims, max_length=maxlen))
2015-07-15 03:35:28 +00:00
model.add(Dropout(0.25))
2015-07-14 20:34:05 +00:00
2015-10-05 01:44:49 +00:00
# we add a Convolution1D, which will learn nb_filter
2015-07-14 20:34:05 +00:00
# word group filters of size filter_length:
2015-10-05 01:44:49 +00:00
model.add(Convolution1D(nb_filter=nb_filter,
2015-07-14 20:34:05 +00:00
filter_length=filter_length,
border_mode="valid",
activation="relu",
subsample_length=1))
# we use standard max pooling (halving the output of the previous layer):
model.add(MaxPooling1D(pool_length=2))
# We flatten the output of the conv layer, so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
2015-10-05 01:44:49 +00:00
model.add(Dense(hidden_dims))
2015-07-15 03:35:28 +00:00
model.add(Dropout(0.25))
2015-07-14 20:34:05 +00:00
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
2015-10-05 01:44:49 +00:00
model.add(Dense(1))
2015-07-14 20:34:05 +00:00
model.add(Activation('sigmoid'))
2015-07-15 03:35:28 +00:00
model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode="binary")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, validation_data=(X_test, y_test))