2015-08-17 11:42:54 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2015-12-09 02:49:14 +00:00
|
|
|
'''An implementation of sequence to sequence learning for performing addition
|
2015-08-17 11:42:54 +00:00
|
|
|
Input: "535+61"
|
|
|
|
Output: "596"
|
|
|
|
Padding is handled by using a repeated sentinel character (space)
|
|
|
|
|
|
|
|
Input may optionally be inverted, shown to increase performance in many tasks in:
|
|
|
|
"Learning to Execute"
|
|
|
|
http://arxiv.org/abs/1410.4615
|
|
|
|
and
|
|
|
|
"Sequence to Sequence Learning with Neural Networks"
|
|
|
|
http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf
|
|
|
|
Theoretically it introduces shorter term dependencies between source and target.
|
|
|
|
|
|
|
|
Two digits inverted:
|
2015-11-29 20:54:45 +00:00
|
|
|
+ One layer LSTM (128 HN), 5k training examples = 99% train/test accuracy in 55 epochs
|
2015-08-18 00:57:20 +00:00
|
|
|
|
2015-08-17 11:42:54 +00:00
|
|
|
Three digits inverted:
|
2015-11-29 20:54:45 +00:00
|
|
|
+ One layer LSTM (128 HN), 50k training examples = 99% train/test accuracy in 100 epochs
|
2015-08-18 00:57:20 +00:00
|
|
|
|
2015-08-17 11:42:54 +00:00
|
|
|
Four digits inverted:
|
2015-11-29 20:54:45 +00:00
|
|
|
+ One layer LSTM (128 HN), 400k training examples = 99% train/test accuracy in 20 epochs
|
2015-08-18 00:57:20 +00:00
|
|
|
|
2015-08-17 11:42:54 +00:00
|
|
|
Five digits inverted:
|
2015-11-29 20:54:45 +00:00
|
|
|
+ One layer LSTM (128 HN), 550k training examples = 99% train/test accuracy in 30 epochs
|
2015-12-09 02:49:14 +00:00
|
|
|
'''
|
|
|
|
|
|
|
|
from __future__ import print_function
|
2016-04-03 02:00:26 +00:00
|
|
|
from keras.models import Sequential
|
2016-05-29 21:12:57 +00:00
|
|
|
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent
|
2015-12-09 02:49:14 +00:00
|
|
|
import numpy as np
|
|
|
|
from six.moves import range
|
2015-08-17 11:42:54 +00:00
|
|
|
|
|
|
|
|
|
|
|
class CharacterTable(object):
|
2017-01-31 19:57:28 +00:00
|
|
|
"""Given a set of characters:
|
2015-08-17 11:42:54 +00:00
|
|
|
+ Encode them to a one hot integer representation
|
|
|
|
+ Decode the one hot integer representation to their character output
|
2016-06-06 20:29:25 +00:00
|
|
|
+ Decode a vector of probabilities to their character output
|
2017-01-31 19:57:28 +00:00
|
|
|
"""
|
|
|
|
def __init__(self, chars):
|
|
|
|
"""Initialize character table.
|
2017-01-11 19:39:58 +00:00
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
# Arguments
|
|
|
|
chars: Characters that can appear in the input.
|
|
|
|
"""
|
2015-08-17 11:42:54 +00:00
|
|
|
self.chars = sorted(set(chars))
|
|
|
|
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
|
|
|
|
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
|
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
def encode(self, C, num_rows):
|
|
|
|
"""One hot encode given string C.
|
|
|
|
|
|
|
|
# Arguments
|
|
|
|
num_rows: Number of rows in the returned one hot encoding. This is
|
|
|
|
used to keep the # of rows for each data the same.
|
|
|
|
"""
|
2017-02-20 03:24:32 +00:00
|
|
|
x = np.zeros((num_rows, len(self.chars)))
|
2015-08-17 11:42:54 +00:00
|
|
|
for i, c in enumerate(C):
|
2017-02-20 03:24:32 +00:00
|
|
|
x[i, self.char_indices[c]] = 1
|
|
|
|
return x
|
2015-08-17 11:42:54 +00:00
|
|
|
|
2017-02-20 03:24:32 +00:00
|
|
|
def decode(self, x, calc_argmax=True):
|
2015-08-17 11:42:54 +00:00
|
|
|
if calc_argmax:
|
2017-02-20 03:24:32 +00:00
|
|
|
x = x.argmax(axis=-1)
|
|
|
|
return ''.join(self.indices_char[x] for x in x)
|
2015-08-17 11:42:54 +00:00
|
|
|
|
2015-08-18 00:57:20 +00:00
|
|
|
|
|
|
|
class colors:
|
|
|
|
ok = '\033[92m'
|
|
|
|
fail = '\033[91m'
|
|
|
|
close = '\033[0m'
|
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
# Parameters for the model and dataset.
|
2015-08-18 00:57:20 +00:00
|
|
|
TRAINING_SIZE = 50000
|
2015-08-17 11:42:54 +00:00
|
|
|
DIGITS = 3
|
|
|
|
INVERT = True
|
2017-01-31 19:57:28 +00:00
|
|
|
|
|
|
|
# Maximum length of input is 'int + int' (e.g., '345+678'). Maximum length of
|
|
|
|
# int is DIGITS.
|
2017-02-20 03:24:32 +00:00
|
|
|
MAxLEN = DIGITS + 1 + DIGITS
|
2015-08-17 11:42:54 +00:00
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
# All the numbers, plus sign and space for padding.
|
2015-08-17 11:42:54 +00:00
|
|
|
chars = '0123456789+ '
|
2017-01-31 19:57:28 +00:00
|
|
|
ctable = CharacterTable(chars)
|
2015-08-17 11:42:54 +00:00
|
|
|
|
|
|
|
questions = []
|
|
|
|
expected = []
|
|
|
|
seen = set()
|
|
|
|
print('Generating data...')
|
2015-08-18 00:57:20 +00:00
|
|
|
while len(questions) < TRAINING_SIZE:
|
2017-01-31 19:57:28 +00:00
|
|
|
f = lambda: int(''.join(np.random.choice(list('0123456789'))
|
|
|
|
for i in range(np.random.randint(1, DIGITS + 1))))
|
2015-08-17 11:42:54 +00:00
|
|
|
a, b = f(), f()
|
|
|
|
# Skip any addition questions we've already seen
|
2017-02-20 03:24:32 +00:00
|
|
|
# Also skip any such that x+Y == Y+x (hence the sorting).
|
2015-08-17 11:42:54 +00:00
|
|
|
key = tuple(sorted((a, b)))
|
|
|
|
if key in seen:
|
|
|
|
continue
|
|
|
|
seen.add(key)
|
2017-02-20 03:24:32 +00:00
|
|
|
# Pad the data with spaces such that it is always MAxLEN.
|
2015-08-17 11:42:54 +00:00
|
|
|
q = '{}+{}'.format(a, b)
|
2017-02-20 03:24:32 +00:00
|
|
|
query = q + ' ' * (MAxLEN - len(q))
|
2015-08-17 11:42:54 +00:00
|
|
|
ans = str(a + b)
|
2017-01-31 19:57:28 +00:00
|
|
|
# Answers can be of maximum size DIGITS + 1.
|
2015-08-17 11:42:54 +00:00
|
|
|
ans += ' ' * (DIGITS + 1 - len(ans))
|
|
|
|
if INVERT:
|
2017-01-31 19:57:28 +00:00
|
|
|
# Reverse the query, e.g., '12+345 ' becomes ' 543+21'. (Note the
|
|
|
|
# space used for padding.)
|
2015-08-17 11:42:54 +00:00
|
|
|
query = query[::-1]
|
|
|
|
questions.append(query)
|
|
|
|
expected.append(ans)
|
|
|
|
print('Total addition questions:', len(questions))
|
|
|
|
|
|
|
|
print('Vectorization...')
|
2017-02-20 03:24:32 +00:00
|
|
|
x = np.zeros((len(questions), MAxLEN, len(chars)), dtype=np.bool)
|
2015-08-17 11:42:54 +00:00
|
|
|
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool)
|
|
|
|
for i, sentence in enumerate(questions):
|
2017-02-20 03:24:32 +00:00
|
|
|
x[i] = ctable.encode(sentence, MAxLEN)
|
2015-08-17 11:42:54 +00:00
|
|
|
for i, sentence in enumerate(expected):
|
2017-01-31 19:57:28 +00:00
|
|
|
y[i] = ctable.encode(sentence, DIGITS + 1)
|
2015-08-17 11:42:54 +00:00
|
|
|
|
2017-02-20 03:24:32 +00:00
|
|
|
# Shuffle (x, y) in unison as the later parts of x will almost all be larger
|
2017-01-31 19:57:28 +00:00
|
|
|
# digits.
|
2015-10-05 01:44:49 +00:00
|
|
|
indices = np.arange(len(y))
|
|
|
|
np.random.shuffle(indices)
|
2017-02-20 03:24:32 +00:00
|
|
|
x = x[indices]
|
2015-10-05 01:44:49 +00:00
|
|
|
y = y[indices]
|
2015-11-29 20:54:45 +00:00
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
# Explicitly set apart 10% for validation data that we never train over.
|
2017-02-20 03:24:32 +00:00
|
|
|
split_at = len(x) - len(x) // 10
|
|
|
|
(x_train, x_val) = x[:split_at], x[split_at:]
|
|
|
|
(y_train, y_val) = y[:split_at], y[split_at:]
|
2015-08-17 11:42:54 +00:00
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
print('Training Data:')
|
2017-02-20 03:24:32 +00:00
|
|
|
print(x_train.shape)
|
2015-10-05 01:44:49 +00:00
|
|
|
print(y_train.shape)
|
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
print('Validation Data:')
|
2017-02-20 03:24:32 +00:00
|
|
|
print(x_val.shape)
|
2017-01-31 19:57:28 +00:00
|
|
|
print(y_val.shape)
|
|
|
|
|
|
|
|
# Try replacing GRU, or SimpleRNN.
|
|
|
|
RNN = recurrent.LSTM
|
|
|
|
HIDDEN_SIZE = 128
|
|
|
|
BATCH_SIZE = 128
|
|
|
|
LAYERS = 1
|
|
|
|
|
2015-08-17 11:42:54 +00:00
|
|
|
print('Build model...')
|
|
|
|
model = Sequential()
|
2017-01-31 19:57:28 +00:00
|
|
|
# "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE.
|
|
|
|
# Note: In a situation where your input sequences have a variable length,
|
2017-02-15 00:08:30 +00:00
|
|
|
# use input_shape=(None, num_feature).
|
2017-02-20 03:24:32 +00:00
|
|
|
model.add(RNN(HIDDEN_SIZE, input_shape=(MAxLEN, len(chars))))
|
2017-01-31 19:57:28 +00:00
|
|
|
# As the decoder RNN's input, repeatedly provide with the last hidden state of
|
|
|
|
# RNN for each time step. Repeat 'DIGITS + 1' times as that's the maximum
|
|
|
|
# length of output, e.g., when DIGITS=3, max output is 999+999=1998.
|
2015-08-17 11:42:54 +00:00
|
|
|
model.add(RepeatVector(DIGITS + 1))
|
2017-01-31 19:57:28 +00:00
|
|
|
# The decoder RNN could be multiple layers stacked or a single layer.
|
2015-10-31 15:16:48 +00:00
|
|
|
for _ in range(LAYERS):
|
2017-01-31 19:57:28 +00:00
|
|
|
# By setting return_sequences to True, return not only the last output but
|
2017-02-15 00:08:30 +00:00
|
|
|
# all the outputs so far in the form of (num_samples, timesteps,
|
2017-01-31 19:57:28 +00:00
|
|
|
# output_dim). This is necessary as TimeDistributed in the below expects
|
|
|
|
# the first dimension to be the timesteps.
|
2015-10-05 01:44:49 +00:00
|
|
|
model.add(RNN(HIDDEN_SIZE, return_sequences=True))
|
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
# Apply a dense layer to the every temporal slice of an input. For each of step
|
|
|
|
# of the output sequence, decide which character should be chosen.
|
2016-05-29 21:12:57 +00:00
|
|
|
model.add(TimeDistributed(Dense(len(chars))))
|
2015-08-17 11:42:54 +00:00
|
|
|
model.add(Activation('softmax'))
|
2016-03-19 16:07:15 +00:00
|
|
|
model.compile(loss='categorical_crossentropy',
|
|
|
|
optimizer='adam',
|
|
|
|
metrics=['accuracy'])
|
2017-01-31 19:57:28 +00:00
|
|
|
model.summary()
|
2015-08-17 11:42:54 +00:00
|
|
|
|
2017-01-31 19:57:28 +00:00
|
|
|
# Train the model each generation and show predictions against the validation
|
|
|
|
# dataset.
|
2015-08-18 00:57:20 +00:00
|
|
|
for iteration in range(1, 200):
|
2015-08-17 11:42:54 +00:00
|
|
|
print()
|
|
|
|
print('-' * 50)
|
|
|
|
print('Iteration', iteration)
|
2017-02-20 03:24:32 +00:00
|
|
|
model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=1,
|
|
|
|
validation_data=(x_val, y_val))
|
2017-01-31 19:57:28 +00:00
|
|
|
# Select 10 samples from the validation set at random so we can visualize
|
|
|
|
# errors.
|
2015-10-31 15:16:48 +00:00
|
|
|
for i in range(10):
|
2017-02-20 03:24:32 +00:00
|
|
|
ind = np.random.randint(0, len(x_val))
|
|
|
|
rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
|
|
|
|
preds = model.predict_classes(rowx, verbose=0)
|
|
|
|
q = ctable.decode(rowx[0])
|
2015-08-17 11:42:54 +00:00
|
|
|
correct = ctable.decode(rowy[0])
|
|
|
|
guess = ctable.decode(preds[0], calc_argmax=False)
|
|
|
|
print('Q', q[::-1] if INVERT else q)
|
|
|
|
print('T', correct)
|
2017-01-31 19:57:28 +00:00
|
|
|
if correct == guess:
|
|
|
|
print(colors.ok + '☑' + colors.close, end=" ")
|
|
|
|
else:
|
|
|
|
print(colors.fail + '☒' + colors.close, end=" ")
|
|
|
|
print(guess)
|
2015-08-17 11:42:54 +00:00
|
|
|
print('---')
|