Reintroduce image_shape and filter_shape in conv2d

This commit is contained in:
Francois Chollet 2015-12-03 22:03:28 -08:00
parent f295ecb302
commit 6c1ce0f6e9
4 changed files with 19 additions and 6 deletions

@ -51,7 +51,7 @@ Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential() model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='same', border_mode='valid',
input_shape=(1, img_rows, img_cols))) input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu')) model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv)) model.add(Convolution2D(nb_filters, nb_conv, nb_conv))

@ -502,7 +502,8 @@ def dropout(x, level, seed=None):
# CONVOLUTIONS # CONVOLUTIONS
def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'): def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th',
image_shape=None, filter_shape=None):
''' '''
Run on cuDNN if available. Run on cuDNN if available.
border_mode: string, "same" or "valid". border_mode: string, "same" or "valid".

@ -515,7 +515,8 @@ def dropout(x, level, seed=None):
# CONVOLUTIONS # CONVOLUTIONS
def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'): def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th',
image_shape=None, filter_shape=None):
''' '''
Run on cuDNN if available. Run on cuDNN if available.
border_mode: string, "same" or "valid". border_mode: string, "same" or "valid".
@ -532,6 +533,12 @@ def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'):
# TF kernel shape: (rows, cols, input_depth, depth) # TF kernel shape: (rows, cols, input_depth, depth)
x = x.dimshuffle((0, 3, 1, 2)) x = x.dimshuffle((0, 3, 1, 2))
kernel = kernel.dimshuffle((3, 2, 0, 1)) kernel = kernel.dimshuffle((3, 2, 0, 1))
if image_shape:
image_shape = (image_shape[0], image_shape[3],
image_shape[1], image_shape[2])
if filter_shape:
filter_shape = (filter_shape[3], filter_shape[2],
filter_shape[0], filter_shape[1])
if _on_gpu() and dnn.dnn_available(): if _on_gpu() and dnn.dnn_available():
if border_mode == 'same': if border_mode == 'same':
@ -558,7 +565,9 @@ def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'):
conv_out = T.nnet.conv.conv2d(x, kernel, conv_out = T.nnet.conv.conv2d(x, kernel,
border_mode=th_border_mode, border_mode=th_border_mode,
subsample=strides) subsample=strides,
image_shape=image_shape,
filter_shape=filter_shape)
if border_mode == 'same': if border_mode == 'same':
shift_x = (kernel.shape[2] - 1) // 2 shift_x = (kernel.shape[2] - 1) // 2
shift_y = (kernel.shape[3] - 1) // 2 shift_y = (kernel.shape[3] - 1) // 2

@ -93,7 +93,8 @@ class Convolution1D(Layer):
X = K.expand_dims(X, -1) # add a dimension of the right X = K.expand_dims(X, -1) # add a dimension of the right
X = K.permute_dimensions(X, (0, 2, 1, 3)) X = K.permute_dimensions(X, (0, 2, 1, 3))
conv_out = K.conv2d(X, self.W, strides=self.subsample, conv_out = K.conv2d(X, self.W, strides=self.subsample,
border_mode=self.border_mode, dim_ordering='th') border_mode=self.border_mode,
dim_ordering='th')
output = conv_out + K.reshape(self.b, (1, self.nb_filter, 1, 1)) output = conv_out + K.reshape(self.b, (1, self.nb_filter, 1, 1))
output = self.activation(output) output = self.activation(output)
@ -212,7 +213,9 @@ class Convolution2D(Layer):
X = self.get_input(train) X = self.get_input(train)
conv_out = K.conv2d(X, self.W, strides=self.subsample, conv_out = K.conv2d(X, self.W, strides=self.subsample,
border_mode=self.border_mode, border_mode=self.border_mode,
dim_ordering=self.dim_ordering) dim_ordering=self.dim_ordering,
image_shape=self.input_shape,
filter_shape=self.W_shape)
output = conv_out + K.reshape(self.b, (1, self.nb_filter, 1, 1)) output = conv_out + K.reshape(self.b, (1, self.nb_filter, 1, 1))
output = self.activation(output) output = self.activation(output)