Reference Style Fix (#4972)

This commit is contained in:
Junwei Pan 2017-01-09 16:11:00 -08:00 committed by François Chollet
parent 5863fc74b1
commit 6b05aebc0c
7 changed files with 14 additions and 14 deletions

@ -8,7 +8,7 @@ document vector is considered to preserve both the word-level and
sentence-level structure of the context. sentence-level structure of the context.
# References # References
- [A Hierarchical Neural Autoencoder for Paragraphs and Documents](https://web.stanford.edu/~jurafsky/pubs/P15-1107.pdf) - [A Hierarchical Neural Autoencoder for Paragraphs and Documents](https://arxiv.org/abs/1506.01057)
Encodes paragraphs and documents with HRNN. Encodes paragraphs and documents with HRNN.
Results have shown that HRNN outperforms standard Results have shown that HRNN outperforms standard
RNNs and may play some role in more sophisticated generation tasks like RNNs and may play some role in more sophisticated generation tasks like

@ -65,7 +65,7 @@ class PReLU(Layer):
set `shared_axes=[1, 2]`. set `shared_axes=[1, 2]`.
# References # References
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/pdf/1502.01852v1.pdf) - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
''' '''
def __init__(self, init='zero', weights=None, shared_axes=None, **kwargs): def __init__(self, init='zero', weights=None, shared_axes=None, **kwargs):
self.supports_masking = True self.supports_masking = True
@ -124,7 +124,7 @@ class ELU(Layer):
alpha: scale for the negative factor. alpha: scale for the negative factor.
# References # References
- [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](http://arxiv.org/pdf/1511.07289v1.pdf) - [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289v1)
''' '''
def __init__(self, alpha=1.0, **kwargs): def __init__(self, alpha=1.0, **kwargs):
self.supports_masking = True self.supports_masking = True
@ -228,7 +228,7 @@ class ThresholdedReLU(Layer):
theta: float >= 0. Threshold location of activation. theta: float >= 0. Threshold location of activation.
# References # References
- [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/pdf/1402.3337.pdf) - [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/abs/1402.3337)
''' '''
def __init__(self, theta=1.0, **kwargs): def __init__(self, theta=1.0, **kwargs):
self.supports_masking = True self.supports_masking = True

@ -583,7 +583,7 @@ class Deconvolution2D(Convolution2D):
`rows` and `cols` values might have changed due to padding. `rows` and `cols` values might have changed due to padding.
# References # References
[1] [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285 "arXiv:1603.07285v1 [stat.ML]") [1] [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1)
[2] [Transposed convolution arithmetic](http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html#transposed-convolution-arithmetic) [2] [Transposed convolution arithmetic](http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html#transposed-convolution-arithmetic)
[3] [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf) [3] [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
''' '''

@ -243,7 +243,7 @@ class ConvLSTM2D(ConvRecurrent2D):
# References # References
- [Convolutional LSTM Network: A Machine Learning Approach for - [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/pdf/1506.04214v1.pdf) Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
The current implementation does not include the feedback loop on the The current implementation does not include the feedback loop on the
cells output cells output
''' '''

@ -116,7 +116,7 @@ class SpatialDropout1D(Dropout):
Same as input Same as input
# References # References
- [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/pdf/1411.4280.pdf) - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
''' '''
def __init__(self, p, **kwargs): def __init__(self, p, **kwargs):
super(SpatialDropout1D, self).__init__(p, **kwargs) super(SpatialDropout1D, self).__init__(p, **kwargs)
@ -154,7 +154,7 @@ class SpatialDropout2D(Dropout):
Same as input Same as input
# References # References
- [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/pdf/1411.4280.pdf) - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
''' '''
def __init__(self, p, dim_ordering='default', **kwargs): def __init__(self, p, dim_ordering='default', **kwargs):
if dim_ordering == 'default': if dim_ordering == 'default':
@ -202,7 +202,7 @@ class SpatialDropout3D(Dropout):
Same as input Same as input
# References # References
- [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/pdf/1411.4280.pdf) - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
''' '''
def __init__(self, p, dim_ordering='default', **kwargs): def __init__(self, p, dim_ordering='default', **kwargs):
if dim_ordering == 'default': if dim_ordering == 'default':
@ -875,7 +875,7 @@ class MaxoutDense(Layer):
2D tensor with shape: `(nb_samples, output_dim)`. 2D tensor with shape: `(nb_samples, output_dim)`.
# References # References
- [Maxout Networks](http://arxiv.org/pdf/1302.4389.pdf) - [Maxout Networks](http://arxiv.org/abs/1302.4389)
''' '''
def __init__(self, output_dim, def __init__(self, output_dim,
nb_feature=4, nb_feature=4,
@ -1001,7 +1001,7 @@ class Highway(Layer):
2D tensor with shape: `(nb_samples, input_dim)`. 2D tensor with shape: `(nb_samples, input_dim)`.
# References # References
- [Highway Networks](http://arxiv.org/pdf/1505.00387v2.pdf) - [Highway Networks](http://arxiv.org/abs/1505.00387v2)
''' '''
def __init__(self, def __init__(self,
init='glorot_uniform', init='glorot_uniform',

@ -59,7 +59,7 @@ class BatchNormalization(Layer):
Same shape as input. Same shape as input.
# References # References
- [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](http://jmlr.org/proceedings/papers/v37/ioffe15.pdf) - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
''' '''
def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99, def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99,
weights=None, beta_init='zero', gamma_init='one', weights=None, beta_init='zero', gamma_init='one',

@ -424,8 +424,8 @@ class GRU(Recurrent):
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections. dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References # References
- [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](http://www.aclweb.org/anthology/W14-4012) - [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/pdf/1412.3555v1.pdf) - [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287) - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
''' '''
def __init__(self, output_dim, def __init__(self, output_dim,