Merge branch 'master' of github.com:fchollet/keras

This commit is contained in:
Francois Chollet 2017-03-21 16:36:18 +01:00
commit 0cc56a46e8
5 changed files with 24 additions and 16 deletions

@ -1,6 +1,6 @@
site_name: Keras Documentation
theme: readthedocs
theme_dir: theme
#theme_dir: theme
docs_dir: sources
repo_url: http://github.com/fchollet/keras
site_url: http://keras.io/

@ -4,18 +4,28 @@ from . import backend as K
from .utils.generic_utils import deserialize_keras_object
def softmax(x):
def softmax(x, axis=-1):
"""Softmax activation function.
# Arguments
x : Tensor.
axis: Integer, axis along which the softmax normalization is applied.
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
ndim = K.ndim(x)
if ndim == 2:
return K.softmax(x)
elif ndim == 3:
e = K.exp(x - K.max(x, axis=-1, keepdims=True))
s = K.sum(e, axis=-1, keepdims=True)
elif ndim > 2:
e = K.exp(x - K.max(x, axis=axis, keepdims=True))
s = K.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor '
'that is not 2D or 3D. '
'Here, ndim=' + str(ndim))
raise ValueError('Cannot apply softmax to a tensor that is 1D')
def elu(x, alpha=1.0):

@ -1136,8 +1136,7 @@ def any(x, axis=None, keepdims=False):
"""
axis = _normalize_axis(axis, ndim(x))
x = tf.cast(x, tf.bool)
x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
return tf.cast(x, tf.uint8)
return tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
def all(x, axis=None, keepdims=False):
@ -1153,8 +1152,7 @@ def all(x, axis=None, keepdims=False):
"""
axis = _normalize_axis(axis, ndim(x))
x = tf.cast(x, tf.bool)
x = tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
return tf.cast(x, tf.uint8)
return tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
def argmax(x, axis=-1):

@ -197,8 +197,8 @@ class Concatenate(_Merge):
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
# but cast it to uint8 first
masks.append(K.cast(K.ones_like(input_i), 'uint8'))
# but cast it to bool first
masks.append(K.cast(K.ones_like(input_i), 'bool'))
elif K.ndim(mask_i) < K.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(K.expand_dims(mask_i))

@ -297,8 +297,8 @@ class Merge(Layer):
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
# but cast it to uint8 first
masks.append(K.cast(K.ones_like(input_i), 'uint8'))
# but cast it to bool first
masks.append(K.cast(K.ones_like(input_i), 'bool'))
elif K.ndim(mask_i) < K.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(K.expand_dims(mask_i))