From 6b05aebc0ce0717297d65305801e27f62a38cff5 Mon Sep 17 00:00:00 2001 From: Junwei Pan Date: Mon, 9 Jan 2017 16:11:00 -0800 Subject: [PATCH] Reference Style Fix (#4972) --- examples/mnist_hierarchical_rnn.py | 2 +- keras/layers/advanced_activations.py | 6 +++--- keras/layers/convolutional.py | 2 +- keras/layers/convolutional_recurrent.py | 2 +- keras/layers/core.py | 10 +++++----- keras/layers/normalization.py | 2 +- keras/layers/recurrent.py | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/examples/mnist_hierarchical_rnn.py b/examples/mnist_hierarchical_rnn.py index 6cb160a5e..7daffbae7 100644 --- a/examples/mnist_hierarchical_rnn.py +++ b/examples/mnist_hierarchical_rnn.py @@ -8,7 +8,7 @@ document vector is considered to preserve both the word-level and sentence-level structure of the context. # References - - [A Hierarchical Neural Autoencoder for Paragraphs and Documents](https://web.stanford.edu/~jurafsky/pubs/P15-1107.pdf) + - [A Hierarchical Neural Autoencoder for Paragraphs and Documents](https://arxiv.org/abs/1506.01057) Encodes paragraphs and documents with HRNN. Results have shown that HRNN outperforms standard RNNs and may play some role in more sophisticated generation tasks like diff --git a/keras/layers/advanced_activations.py b/keras/layers/advanced_activations.py index cf85302cd..12df1af70 100644 --- a/keras/layers/advanced_activations.py +++ b/keras/layers/advanced_activations.py @@ -65,7 +65,7 @@ class PReLU(Layer): set `shared_axes=[1, 2]`. # References - - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/pdf/1502.01852v1.pdf) + - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852) ''' def __init__(self, init='zero', weights=None, shared_axes=None, **kwargs): self.supports_masking = True @@ -124,7 +124,7 @@ class ELU(Layer): alpha: scale for the negative factor. # References - - [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](http://arxiv.org/pdf/1511.07289v1.pdf) + - [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289v1) ''' def __init__(self, alpha=1.0, **kwargs): self.supports_masking = True @@ -228,7 +228,7 @@ class ThresholdedReLU(Layer): theta: float >= 0. Threshold location of activation. # References - - [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/pdf/1402.3337.pdf) + - [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/abs/1402.3337) ''' def __init__(self, theta=1.0, **kwargs): self.supports_masking = True diff --git a/keras/layers/convolutional.py b/keras/layers/convolutional.py index be0500ddc..d15578e2c 100644 --- a/keras/layers/convolutional.py +++ b/keras/layers/convolutional.py @@ -583,7 +583,7 @@ class Deconvolution2D(Convolution2D): `rows` and `cols` values might have changed due to padding. # References - [1] [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285 "arXiv:1603.07285v1 [stat.ML]") + [1] [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1) [2] [Transposed convolution arithmetic](http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html#transposed-convolution-arithmetic) [3] [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf) ''' diff --git a/keras/layers/convolutional_recurrent.py b/keras/layers/convolutional_recurrent.py index 1c3eba764..852d7ad17 100644 --- a/keras/layers/convolutional_recurrent.py +++ b/keras/layers/convolutional_recurrent.py @@ -243,7 +243,7 @@ class ConvLSTM2D(ConvRecurrent2D): # References - [Convolutional LSTM Network: A Machine Learning Approach for - Precipitation Nowcasting](http://arxiv.org/pdf/1506.04214v1.pdf) + Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1) The current implementation does not include the feedback loop on the cells output ''' diff --git a/keras/layers/core.py b/keras/layers/core.py index 8f9a387af..945093ae5 100644 --- a/keras/layers/core.py +++ b/keras/layers/core.py @@ -116,7 +116,7 @@ class SpatialDropout1D(Dropout): Same as input # References - - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/pdf/1411.4280.pdf) + - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) ''' def __init__(self, p, **kwargs): super(SpatialDropout1D, self).__init__(p, **kwargs) @@ -154,7 +154,7 @@ class SpatialDropout2D(Dropout): Same as input # References - - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/pdf/1411.4280.pdf) + - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) ''' def __init__(self, p, dim_ordering='default', **kwargs): if dim_ordering == 'default': @@ -202,7 +202,7 @@ class SpatialDropout3D(Dropout): Same as input # References - - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/pdf/1411.4280.pdf) + - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) ''' def __init__(self, p, dim_ordering='default', **kwargs): if dim_ordering == 'default': @@ -875,7 +875,7 @@ class MaxoutDense(Layer): 2D tensor with shape: `(nb_samples, output_dim)`. # References - - [Maxout Networks](http://arxiv.org/pdf/1302.4389.pdf) + - [Maxout Networks](http://arxiv.org/abs/1302.4389) ''' def __init__(self, output_dim, nb_feature=4, @@ -1001,7 +1001,7 @@ class Highway(Layer): 2D tensor with shape: `(nb_samples, input_dim)`. # References - - [Highway Networks](http://arxiv.org/pdf/1505.00387v2.pdf) + - [Highway Networks](http://arxiv.org/abs/1505.00387v2) ''' def __init__(self, init='glorot_uniform', diff --git a/keras/layers/normalization.py b/keras/layers/normalization.py index aa1414a35..19db6cde1 100644 --- a/keras/layers/normalization.py +++ b/keras/layers/normalization.py @@ -59,7 +59,7 @@ class BatchNormalization(Layer): Same shape as input. # References - - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](http://jmlr.org/proceedings/papers/v37/ioffe15.pdf) + - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167) ''' def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', diff --git a/keras/layers/recurrent.py b/keras/layers/recurrent.py index 7205c395c..31deb03ec 100644 --- a/keras/layers/recurrent.py +++ b/keras/layers/recurrent.py @@ -424,8 +424,8 @@ class GRU(Recurrent): dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections. # References - - [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](http://www.aclweb.org/anthology/W14-4012) - - [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/pdf/1412.3555v1.pdf) + - [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259) + - [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1) - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287) ''' def __init__(self, output_dim,