From af5c5b6a55528a255500b733439a66d25b5647ec Mon Sep 17 00:00:00 2001 From: jakeleeme Date: Mon, 6 Jun 2016 13:29:25 -0700 Subject: [PATCH] Spellcheck source files (#2907) --- .../getting-started/functional-api-guide.md | 2 +- docs/templates/preprocessing/image.md | 4 ++-- docs/templates/preprocessing/sequence.md | 20 +++++++++---------- docs/templates/scikit-learn-api.md | 2 +- examples/addition_rnn.py | 2 +- examples/deep_dream.py | 2 +- examples/neural_style_transfer.py | 4 ++-- keras/backend/tensorflow_backend.py | 4 ++-- keras/backend/theano_backend.py | 2 +- keras/callbacks.py | 4 ++-- keras/engine/topology.py | 12 +++++------ keras/engine/training.py | 6 +++--- keras/layers/advanced_activations.py | 2 +- keras/layers/convolutional.py | 6 +++--- keras/layers/core.py | 10 +++++----- keras/layers/embeddings.py | 2 +- keras/layers/noise.py | 4 ++-- keras/layers/normalization.py | 2 +- keras/layers/recurrent.py | 2 +- keras/legacy/models.py | 8 ++++---- keras/models.py | 2 +- keras/preprocessing/sequence.py | 4 ++-- keras/utils/np_utils.py | 2 +- keras/wrappers/scikit_learn.py | 6 +++--- 24 files changed, 57 insertions(+), 57 deletions(-) diff --git a/docs/templates/getting-started/functional-api-guide.md b/docs/templates/getting-started/functional-api-guide.md index 43b4b6940..790c7de0c 100644 --- a/docs/templates/getting-started/functional-api-guide.md +++ b/docs/templates/getting-started/functional-api-guide.md @@ -166,7 +166,7 @@ Let's consider a dataset of tweets. We want to build a model that can tell wheth One way to achieve this is to build a model that encodes two tweets into two vectors, concatenates the vectors and adds a logistic regression of top, outputting a probability that the two tweets share the same author. The model would then be trained on positive tweet pairs and negative tweet pairs. -Because the problem is symetric, the mechanism that encodes the first tweet should be reused (weights and all) to encode the second tweet. Here we use a shared LSTM layer to encode the tweets. +Because the problem is symmetric, the mechanism that encodes the first tweet should be reused (weights and all) to encode the second tweet. Here we use a shared LSTM layer to encode the tweets. Let's build this with the functional API. We will take as input for a tweet a binary matrix of shape `(140, 256)`, i.e. a sequence of 140 vectors of size 256, where each dimension in the 256-dimensional vector encodes the presence/absence of a character (out of an alphabet of 256 frequent characters). diff --git a/docs/templates/preprocessing/image.md b/docs/templates/preprocessing/image.md index 6b8f5fadd..84344d237 100644 --- a/docs/templates/preprocessing/image.md +++ b/docs/templates/preprocessing/image.md @@ -73,7 +73,7 @@ Generate batches of tensor image data with real-time data augmentation. The data and the subdirectories should contain PNG or JPG images. See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d) for more details. - __target_size__: tuple of integers, default: `(256, 256)`. The dimensions to which all images found will be resized. - __color_mode__: one of "grayscale", "rbg". Default: "rgb". Whether the images will be converted to have 1 or 3 color channels. - - __classes__: optional list of class subdirectories (e.g. `['dogs', 'cats']`). Default: None. If not provided, the list of classes will be automatically infered (and the order of the classes, which will map to the label indices, will be alphanumeral). + - __classes__: optional list of class subdirectories (e.g. `['dogs', 'cats']`). Default: None. If not provided, the list of classes will be automatically inferred (and the order of the classes, which will map to the label indices, will be alphanumeric). - __class_mode__: one of "categorical", "binary", "sparse" or None. Default: "categorical". Determines the type of label arrays that are returned: "categorical" will be 2D one-hot encoded labels, "binary" will be 1D binary labels, "sparse" will be 1D integer labels. If None, no labels are returned (the generator will only yield batches of image data, which is useful to use `model.predict_generator()`, `model.evaluate_generator()`, etc.). - __batch_size__: size of the batches of data (default: 32). - __shuffle__: whether to shuffle the data (default: True) @@ -150,4 +150,4 @@ model.fit_generator( nb_epoch=50, validation_data=validation_generator, nb_val_samples=800) -``` \ No newline at end of file +``` diff --git a/docs/templates/preprocessing/sequence.md b/docs/templates/preprocessing/sequence.md index 81e3b2682..cc76abef4 100644 --- a/docs/templates/preprocessing/sequence.md +++ b/docs/templates/preprocessing/sequence.md @@ -4,14 +4,14 @@ keras.preprocessing.sequence.pad_sequences(sequences, maxlen=None, dtype='int32') ``` -Transform a list of `nb_samples sequences` (lists of scalars) into a 2D numpy array of shape `(nb_samples, nb_timesteps)`. `nb_timesteps` is either the `maxlen` argument if provided, or the length of the longest sequence otherwise. Sequences that are shorter than `nb_timesteps` are padded with zeros at the end. +Transform a list of `nb_samples sequences` (lists of scalars) into a 2D Numpy array of shape `(nb_samples, nb_timesteps)`. `nb_timesteps` is either the `maxlen` argument if provided, or the length of the longest sequence otherwise. Sequences that are shorter than `nb_timesteps` are padded with zeros at the end. -- __Return__: 2D numpy array of shape `(nb_samples, nb_timesteps)`. +- __Return__: 2D Numpy array of shape `(nb_samples, nb_timesteps)`. - __Arguments__: - __sequences__: List of lists of int or float. - __maxlen__: None or int. Maximum sequence length, longer sequences are truncated and shorter sequences are padded with zeros at the end. - - __dtype__: datatype of the numpy array returned. + - __dtype__: datatype of the Numpy array returned. - __padding__: 'pre' or 'post', pad either before or after each sequence. - __truncating__: 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence - __value__: float, value to pad the sequences to the desired value. @@ -21,12 +21,12 @@ Transform a list of `nb_samples sequences` (lists of scalars) into a 2D numpy ar ## skipgrams ```python -keras.preprocessing.sequence.skipgrams(sequence, vocabulary_size, - window_size=4, negative_samples=1., shuffle=True, +keras.preprocessing.sequence.skipgrams(sequence, vocabulary_size, + window_size=4, negative_samples=1., shuffle=True, categorical=False, sampling_table=None) ``` -Transforms a sequence of word indexes (list of int) into couples of the form: +Transforms a sequence of word indexes (list of int) into couples of the form: - (word, word in the same window), with label 1 (positive samples). - (word, random word from the vocabulary), with label 0 (negative samples). @@ -34,8 +34,8 @@ Transforms a sequence of word indexes (list of int) into couples of the form: Read more about Skipgram in this gnomic paper by Mikolov et al.: [Efficient Estimation of Word Representations in Vector Space](http://arxiv.org/pdf/1301.3781v3.pdf) -- __Return__: tuple `(couples, labels)`. - - `couples` is a list of 2-elements lists of int: `[word_index, other_word_index]`. +- __Return__: tuple `(couples, labels)`. + - `couples` is a list of 2-elements lists of int: `[word_index, other_word_index]`. - `labels` is a list of 0 and 1, where 1 indicates that `other_word_index` was found in the same window as `word_index`, and 0 indicates that `other_word_index` was random. - if categorical is set to True, the labels are categorical, ie. 1 becomes [0,1], and 0 becomes [1, 0]. @@ -46,7 +46,7 @@ Vector Space](http://arxiv.org/pdf/1301.3781v3.pdf) - __negative_samples__: float >= 0. 0 for no negative (=random) samples. 1 for same number as positive samples. etc. - __shuffle__: boolean. Whether to shuffle the samples. - __categorical__: boolean. Whether to make the returned labels categorical. - - __sampling_table__: numpy array of shape `(vocabulary_size,)` where `sampling_table[i]` is the probability of sampling the word with index i (assumed to be i-th most common word in the dataset). + - __sampling_table__: Numpy array of shape `(vocabulary_size,)` where `sampling_table[i]` is the probability of sampling the word with index i (assumed to be i-th most common word in the dataset). --- @@ -59,7 +59,7 @@ keras.preprocessing.sequence.make_sampling_table(size, sampling_factor=1e-5) Used for generating the `sampling_table` argument for `skipgrams`. `sampling_table[i]` is the probability of sampling the word i-th most common word in a dataset (more common words should be sampled less frequently, for balance). -- __Return__: numpy array of shape `(size,)`. +- __Return__: Numpy array of shape `(size,)`. - __Arguments__: - __size__: size of the vocabulary considered. diff --git a/docs/templates/scikit-learn-api.md b/docs/templates/scikit-learn-api.md index cf8ba34a8..cc18bdd69 100644 --- a/docs/templates/scikit-learn-api.md +++ b/docs/templates/scikit-learn-api.md @@ -25,7 +25,7 @@ present class will then be treated as the default build_fn. `sk_params` takes both model parameters and fitting parameters. Legal model parameters are the arguments of `build_fn`. Note that like all other -estimators in scikit-learn, 'build_fn' should provide defalult values for +estimators in scikit-learn, 'build_fn' should provide default values for its arguments, so that you could create the estimator without passing any values to `sk_params`. diff --git a/examples/addition_rnn.py b/examples/addition_rnn.py index 58b6d2074..db6ae0180 100644 --- a/examples/addition_rnn.py +++ b/examples/addition_rnn.py @@ -39,7 +39,7 @@ class CharacterTable(object): Given a set of characters: + Encode them to a one hot integer representation + Decode the one hot integer representation to their character output - + Decode a vector of probabilties to their character output + + Decode a vector of probabilities to their character output ''' def __init__(self, chars, maxlen): self.chars = sorted(set(chars)) diff --git a/examples/deep_dream.py b/examples/deep_dream.py index b30923d9a..770570f6f 100644 --- a/examples/deep_dream.py +++ b/examples/deep_dream.py @@ -9,7 +9,7 @@ e.g.: python deep_dream.py img/mypic.jpg results/dream ``` -It is preferrable to run this script on GPU, for speed. +It is preferable to run this script on GPU, for speed. If running on CPU, prefer the TensorFlow backend (much faster). Example results: http://i.imgur.com/FX6ROg9.jpg diff --git a/examples/neural_style_transfer.py b/examples/neural_style_transfer.py index 30c84f083..2a7a806e8 100644 --- a/examples/neural_style_transfer.py +++ b/examples/neural_style_transfer.py @@ -14,7 +14,7 @@ e.g.: python neural_style_transfer.py img/tuebingen.jpg img/starry_night.jpg results/my_result ``` -It is preferrable to run this script on GPU, for speed. +It is preferable to run this script on GPU, for speed. If running on CPU, prefer the TensorFlow backend (much faster). Example result: https://twitter.com/fchollet/status/686631033085677568 @@ -34,7 +34,7 @@ the pixels of the combination image, giving it visual coherence. - The style loss is where the deep learning keeps in --that one is defined using a deep convolutional neural network. Precisely, it consists in a sum of -L2 distances betwen the Gram matrices of the representations of +L2 distances between the Gram matrices of the representations of the base image and the style reference image, extracted from different layers of a convnet (trained on ImageNet). The general idea is to capture color/texture information at different spatial diff --git a/keras/backend/tensorflow_backend.py b/keras/backend/tensorflow_backend.py index c37e84267..f2a98a00f 100644 --- a/keras/backend/tensorflow_backend.py +++ b/keras/backend/tensorflow_backend.py @@ -328,7 +328,7 @@ def std(x, axis=None, keepdims=False): def mean(x, axis=None, keepdims=False): - '''Mean of a tensor, alongside the specificied axis. + '''Mean of a tensor, alongside the specified axis. ''' axis = _normalize_axis(axis, ndim(x)) if x.dtype.base_dtype == tf.bool: @@ -971,7 +971,7 @@ def dropout(x, level, seed=None): def l2_normalize(x, axis): - '''Normalizes a tensor wrt the L2 norm alonside the specified axis. + '''Normalizes a tensor wrt the L2 norm alongside the specified axis. ''' if axis < 0: axis = axis % len(x.get_shape()) diff --git a/keras/backend/theano_backend.py b/keras/backend/theano_backend.py index bdeef4673..8e866426a 100644 --- a/keras/backend/theano_backend.py +++ b/keras/backend/theano_backend.py @@ -397,7 +397,7 @@ def temporal_padding(x, padding=1): '''Pad the middle dimension of a 3D tensor with "padding" zeros left and right. - Appologies for the inane API, but Theano makes this + Apologies for the inane API, but Theano makes this really hard. ''' input_shape = x.shape diff --git a/keras/callbacks.py b/keras/callbacks.py index e1c6c9f45..28686a852 100644 --- a/keras/callbacks.py +++ b/keras/callbacks.py @@ -426,11 +426,11 @@ class TensorBoard(Callback): # Arguments log_dir: the path of the directory where to save the log - files to be parsed by tensorboard + files to be parsed by Tensorboard histogram_freq: frequency (in epochs) at which to compute activation histograms for the layers of the model. If set to 0, histograms won't be computed. - write_graph: whether to visualize the graph in tensorboard. The log file can + write_graph: whether to visualize the graph in Tensorboard. The log file can become quite large when write_graph is set to True. ''' diff --git a/keras/engine/topology.py b/keras/engine/topology.py index 82bed08c2..523960ebd 100644 --- a/keras/engine/topology.py +++ b/keras/engine/topology.py @@ -281,7 +281,7 @@ class Layer(object): self.outbound_nodes = [] # these properties will be set upon call of self.build(), - # which itself will be calld upon self.add_inbound_node if necessary. + # which itself will be called upon self.add_inbound_node if necessary. self.trainable_weights = [] self.non_trainable_weights = [] self.regularizers = [] @@ -512,7 +512,7 @@ class Layer(object): where to connect the current layer. tensor_indices: integer or list of integers. The output of the inbound node might be a list/tuple - of tensor, and we might only be interested in one sepcific entry. + of tensor, and we might only be interested in one specific entry. This index allows you to specify the index of the entry in the output list (if applicable). "None" means that we take all outputs (as a list). ''' @@ -1003,7 +1003,7 @@ def Input(shape=None, batch_shape=None, Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided. dtype: The data type expected by the input, as a string - (`float32`, `flaot64`, `int32`...) + (`float32`, `float64`, `int32`...) # Example usage @@ -1233,8 +1233,8 @@ class Merge(Layer): def __call__(self, inputs, mask=None): '''We disable successive calls to __call__ for Merge layers. Although there is no technical obstacle to - making it possible to __call__ a Merge intance many times - (it is just a layer), it would make for a rather unelegant API. + making it possible to __call__ a Merge instance many times + (it is just a layer), it would make for a rather inelegant API. ''' if type(inputs) is not list: raise Exception('Merge can only be called on a list of tensors, ' @@ -1272,7 +1272,7 @@ class Merge(Layer): return self.call(inputs, mask) def get_output_shape_for(self, input_shape): - assert type(input_shape) is list # must have mutiple input shape tuples + assert type(input_shape) is list # must have multiple input shape tuples # case: callable self._output_shape if hasattr(self.mode, '__call__'): if hasattr(self._output_shape, '__call__'): diff --git a/keras/engine/training.py b/keras/engine/training.py index 122ae8583..451ee2222 100644 --- a/keras/engine/training.py +++ b/keras/engine/training.py @@ -825,7 +825,7 @@ class Model(Container): verbose: verbosity mode. # Returns - Array of prections (if the model has a single output) + Array of predictions (if the model has a single output) or list of arrays of predictions (if the model has multiple outputs). ''' @@ -969,7 +969,7 @@ class Model(Container): at the end of each epoch. The model will not be trained on this data. This could be a tuple (x_val, y_val) or a tuple (val_x, val_y, val_sample_weights). shuffle: boolean, whether to shuffle the training data before each epoch. - class_weight: optional dictionary mapping classe indices (integers) to + class_weight: optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to @@ -1144,7 +1144,7 @@ class Model(Container): with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). - class_weight: optional dictionary mapping classe indices (integers) to + class_weight: optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to diff --git a/keras/layers/advanced_activations.py b/keras/layers/advanced_activations.py index 8bc0cb264..287456874 100644 --- a/keras/layers/advanced_activations.py +++ b/keras/layers/advanced_activations.py @@ -51,7 +51,7 @@ class PReLU(Layer): # Arguments init: initialization function for the weights. - weights: initial weights, as a list of a single numpy array. + weights: initial weights, as a list of a single Numpy array. # References - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/pdf/1502.01852v1.pdf) diff --git a/keras/layers/convolutional.py b/keras/layers/convolutional.py index 8133b7870..0f03601d3 100644 --- a/keras/layers/convolutional.py +++ b/keras/layers/convolutional.py @@ -394,7 +394,7 @@ class Convolution3D(Layer): # Arguments nb_filter: Number of convolution filters to use. - kernel_dim1: Length of the first dimension in the covolution kernel. + kernel_dim1: Length of the first dimension in the convolution kernel. kernel_dim2: Length of the second dimension in the convolution kernel. kernel_dim3: Length of the third dimension in the convolution kernel. init: name of initialization function for the weights of the layer @@ -407,7 +407,7 @@ class Convolution3D(Layer): or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). - weights: list of numpy arrays to set as initial weights. + weights: list of Numpy arrays to set as initial weights. border_mode: 'valid' or 'same'. subsample: tuple of length 3. Factor by which to subsample output. Also called strides elsewhere. @@ -1100,7 +1100,7 @@ class UpSampling3D(Layer): def __init__(self, size=(2, 2, 2), dim_ordering=K.image_dim_ordering(), **kwargs): if K._BACKEND != 'theano': raise Exception(self.__class__.__name__ + - ' is currently only working with Theano backend.') + ' is currently only working with Theano backend.') self.size = tuple(size) assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}' self.dim_ordering = dim_ordering diff --git a/keras/layers/core.py b/keras/layers/core.py index 766b8ad22..0846d0880 100644 --- a/keras/layers/core.py +++ b/keras/layers/core.py @@ -161,7 +161,7 @@ class Reshape(Layer): '''Find and replace a single missing dimension in an output shape given an input shape. - A near direct port of the internal numpy function _fix_unknown_dimension + A near direct port of the internal Numpy function _fix_unknown_dimension in numpy/core/src/multiarray/shape.c # Arguments @@ -537,7 +537,7 @@ class Dense(Layer): or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). - weights: list of numpy arrays to set as initial weights. + weights: list of Numpy arrays to set as initial weights. The list should have 2 elements, of shape `(input_dim, output_dim)` and (output_dim,) for weights and biases respectively. W_regularizer: instance of [WeightRegularizer](../regularizers.md) @@ -702,7 +702,7 @@ class MaxoutDense(Layer): or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. - weights: list of numpy arrays to set as initial weights. + weights: list of Numpy arrays to set as initial weights. The list should have 2 elements, of shape `(input_dim, output_dim)` and (output_dim,) for weights and biases respectively. W_regularizer: instance of [WeightRegularizer](../regularizers.md) @@ -834,7 +834,7 @@ class Highway(Layer): or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). - weights: list of numpy arrays to set as initial weights. + weights: list of Numpy arrays to set as initial weights. The list should have 2 elements, of shape `(input_dim, output_dim)` and (output_dim,) for weights and biases respectively. W_regularizer: instance of [WeightRegularizer](../regularizers.md) @@ -983,7 +983,7 @@ class TimeDistributedDense(Layer): or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). - weights: list of numpy arrays to set as initial weights. + weights: list of Numpy arrays to set as initial weights. The list should have 2 elements, of shape `(input_dim, output_dim)` and (output_dim,) for weights and biases respectively. W_regularizer: instance of [WeightRegularizer](../regularizers.md) diff --git a/keras/layers/embeddings.py b/keras/layers/embeddings.py index 1f056469c..af69d2164 100644 --- a/keras/layers/embeddings.py +++ b/keras/layers/embeddings.py @@ -35,7 +35,7 @@ class Embedding(Layer): of the layer (see: [initializations](../initializations.md)), or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. - weights: list of numpy arrays to set as initial weights. + weights: list of Numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. W_regularizer: instance of the [regularizers](../regularizers.md) module (eg. L1 or L2 regularization), applied to the embedding matrix. diff --git a/keras/layers/noise.py b/keras/layers/noise.py index 2df104848..222c1e102 100644 --- a/keras/layers/noise.py +++ b/keras/layers/noise.py @@ -4,7 +4,7 @@ from .. import backend as K class GaussianNoise(Layer): - '''Apply to the input an additive zero-centred gaussian noise with + '''Apply to the input an additive zero-centered Gaussian noise with standard deviation `sigma`. This is useful to mitigate overfitting (you could see it as a kind of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process @@ -42,7 +42,7 @@ class GaussianNoise(Layer): class GaussianDropout(Layer): - '''Apply to the input an multiplicative one-centred gaussian noise + '''Apply to the input an multiplicative one-centered Gaussian noise with standard deviation `sqrt(p/(1-p))`. As it is a regularization layer, it is only active at training time. diff --git a/keras/layers/normalization.py b/keras/layers/normalization.py index 80a080bdc..0a261dff5 100644 --- a/keras/layers/normalization.py +++ b/keras/layers/normalization.py @@ -33,7 +33,7 @@ class BatchNormalization(Layer): exponential average of the mean and standard deviation of the data, for feature-wise normalization. weights: Initialization weights. - List of 2 numpy arrays, with shapes: + List of 2 Numpy arrays, with shapes: `[(input_shape,), (input_shape,)]` beta_init: name of initialization function for shift parameter (see [initializations](../initializations.md)), or alternatively, diff --git a/keras/layers/recurrent.py b/keras/layers/recurrent.py index 0fb2b0a1f..71604e35b 100644 --- a/keras/layers/recurrent.py +++ b/keras/layers/recurrent.py @@ -66,7 +66,7 @@ class Recurrent(Layer): ``` # Arguments - weights: list of numpy arrays to set as initial weights. + weights: list of Numpy arrays to set as initial weights. The list should have 3 elements, of shapes: `[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`. return_sequences: Boolean. Whether to return the last output diff --git a/keras/legacy/models.py b/keras/legacy/models.py index 093b3e99b..ef61387e3 100644 --- a/keras/legacy/models.py +++ b/keras/legacy/models.py @@ -384,7 +384,7 @@ class Graph(Model): # Arguments data: dictionary mapping input names and outputs names to - appropriate numpy arrays. All arrays should contain + appropriate Numpy arrays. All arrays should contain the same number of samples. batch_size: int. Number of samples per gradient update. nb_epoch: int. @@ -395,7 +395,7 @@ class Graph(Model): validation_split: float (0. < x < 1). Fraction of the data to use as held-out validation data. validation_data: dictionary mapping input names and outputs names - to appropriate numpy arrays to be used as + to appropriate Numpy arrays to be used as held-out validation data. All arrays should contain the same number of samples. Will override validation_split. @@ -560,7 +560,7 @@ class Graph(Model): verbose: verbosity mode, 0, 1, or 2. callbacks: list of callbacks to be called during training. validation_data: dictionary mapping input names and outputs names - to appropriate numpy arrays to be used as + to appropriate Numpy arrays to be used as held-out validation data, or a generator yielding such dictionaries. All arrays should contain the same number of samples. If a generator, will be called until more than @@ -582,7 +582,7 @@ class Graph(Model): while 1: f = open(path) for line in f: - # create numpy arrays of input data + # create Numpy arrays of input data # and labels, from each line in the file x1, x2, y = process_line(line) yield ({'input_1': x1, 'input_2': x2, 'output': y}) diff --git a/keras/models.py b/keras/models.py index 4cd02dca1..65808d531 100644 --- a/keras/models.py +++ b/keras/models.py @@ -615,7 +615,7 @@ class Sequential(Model): while 1: f = open(path) for line in f: - # create numpy arrays of input data + # create Numpy arrays of input data # and labels, from each line in the file x, y = process_line(line) yield (x, y) diff --git a/keras/preprocessing/sequence.py b/keras/preprocessing/sequence.py index 8884b5528..bf1981e66 100644 --- a/keras/preprocessing/sequence.py +++ b/keras/preprocessing/sequence.py @@ -100,7 +100,7 @@ def skipgrams(sequence, vocabulary_size, '''Take a sequence (list of indexes of words), returns couples of [word_index, other_word index] and labels (1s or 0s), where label = 1 if 'other_word' belongs to the context of 'word', - and label=0 if 'other_word' is ramdomly sampled + and label=0 if 'other_word' is randomly sampled # Arguments vocabulary_size: int. maximum possible word index + 1 @@ -113,7 +113,7 @@ def skipgrams(sequence, vocabulary_size, if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ] # Returns - couples, lables: where `couples` are int pairs and + couples, labels: where `couples` are int pairs and `labels` are either 0 or 1. # Notes diff --git a/keras/utils/np_utils.py b/keras/utils/np_utils.py index f72e9115c..c9459d59e 100644 --- a/keras/utils/np_utils.py +++ b/keras/utils/np_utils.py @@ -53,7 +53,7 @@ def categorical_probas_to_classes(p): def convert_kernel(kernel, dim_ordering='th'): - '''Converts a kernel matrix (numpy array) + '''Converts a kernel matrix (Numpy array) from Theano format to TensorFlow format (or reciprocally, since the transformation is its own inverse). diff --git a/keras/wrappers/scikit_learn.py b/keras/wrappers/scikit_learn.py index 353307a46..a0a4a9954 100644 --- a/keras/wrappers/scikit_learn.py +++ b/keras/wrappers/scikit_learn.py @@ -28,7 +28,7 @@ class BaseWrapper(object): `sk_params` takes both model parameters and fitting parameters. Legal model parameters are the arguments of `build_fn`. Note that like all other - estimators in scikit-learn, 'build_fn' should provide defalult values for + estimators in scikit-learn, 'build_fn' should provide default values for its arguments, so that you could create the estimator without passing any values to `sk_params`. @@ -153,10 +153,10 @@ class BaseWrapper(object): # Arguments fn : arbitrary function - override: dictionary, values to overrid sk_params + override: dictionary, values to override sk_params # Returns - res : dictionary dictionary containing variabls + res : dictionary dictionary containing variables in both sk_params and fn's arguments. ''' res = {}