keras/examples/imdb_bidirectional_lstm.py
Fariz Rahman f25e894558 Bidirectional Wrapper (#3495)
* Add Bidirectional Wrapper

* Fix example

* Update wrappers.py

* Add reverse op

* Update tensorflow_backend.py

* Update wrappers.py

* Update test_wrappers.py

* bug fix

* Update test_wrappers.py

* Update test_wrappers.py

* bug fix

* Add test for reverse op

* Enable reverse along multiple axes

* Update theano_backend.py

* Update theano_backend.py

* Update test_wrappers.py

* Speed up tests

* Validate merge_mode arg, Add None mode

* Update test_wrappers.py

* Update test_wrappers.py

* Add properties; reverse -> backward

* Bug fix

* Resolve naming conflict

* Whitespace fix

* Update imdb_bidirectional_lstm.py

* Fix imports
2016-08-17 16:27:06 -07:00

48 lines
1.5 KiB
Python

'''Train a Bidirectional LSTM on the IMDB sentiment classification task.
Output after 4 epochs on CPU: ~0.8146
Time per epoch on CPU (Core i7): ~150s.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM, Input, Bidirectional
from keras.datasets import imdb
max_features = 20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
model.add(Bidirectional(LSTM(64)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train,
batch_size=batch_size,
nb_epoch=4,
validation_data=[X_test, y_test])