keras/examples/imdb_lstm.py

58 lines
2.0 KiB
Python
Raw Normal View History

2016-03-19 16:07:15 +00:00
'''Trains a LSTM on the IMDB sentiment classification task.
2015-12-09 02:49:14 +00:00
The dataset is actually too small for LSTM to be of any advantage
2016-08-01 00:45:32 +00:00
compared to simpler, much faster methods such as TF-IDF + LogReg.
2015-12-09 02:49:14 +00:00
Notes:
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Some configurations won't converge.
- LSTM loss decrease patterns during training can be quite different
from what you see with CNNs/MLPs/etc.
'''
from __future__ import print_function
2015-03-28 00:59:42 +00:00
import numpy as np
2015-07-26 08:00:18 +00:00
np.random.seed(1337) # for reproducibility
2015-03-28 00:59:42 +00:00
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
2016-05-12 01:45:37 +00:00
from keras.layers import Dense, Dropout, Activation, Embedding
from keras.layers import LSTM, SimpleRNN, GRU
2015-03-28 00:59:42 +00:00
from keras.datasets import imdb
2015-06-26 22:21:10 +00:00
max_features = 20000
2016-03-19 16:07:15 +00:00
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
2015-06-26 22:21:10 +00:00
2015-12-09 02:49:14 +00:00
print('Loading data...')
2016-07-28 00:30:04 +00:00
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
2015-03-28 00:59:42 +00:00
print('Pad sequences (samples x time)')
2015-03-28 00:59:42 +00:00
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
2015-03-28 00:59:42 +00:00
print('Build model...')
2015-03-28 00:59:42 +00:00
model = Sequential()
2016-08-28 03:28:03 +00:00
model.add(Embedding(max_features, 128, dropout=0.2))
2016-04-07 00:20:43 +00:00
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun
2015-10-05 01:44:49 +00:00
model.add(Dense(1))
2015-03-28 00:59:42 +00:00
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
2015-11-29 00:34:52 +00:00
model.compile(loss='binary_crossentropy',
2016-03-19 16:07:15 +00:00
optimizer='adam',
metrics=['accuracy'])
2015-03-28 00:59:42 +00:00
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
2016-03-19 16:07:15 +00:00
validation_data=(X_test, y_test))
2015-11-29 00:34:52 +00:00
score, acc = model.evaluate(X_test, y_test,
2016-03-19 16:07:15 +00:00
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)