From 15c00f8948534b8c2a3a59113872c0206e06dd2f Mon Sep 17 00:00:00 2001 From: gaikwadrahul8 <115997457+gaikwadrahul8@users.noreply.github.com> Date: Fri, 11 Oct 2024 03:33:13 +0530 Subject: [PATCH] Fix typos in the documentation strings of the tfjs-layers directory (#8411) --- tfjs-layers/README.md | 2 +- tfjs-layers/demos/README.md | 2 +- .../src/layers/nlp/modeling/transformer_decoder_test.ts | 4 ++-- tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts | 2 +- tfjs-layers/src/layers/nlp/multihead_attention.ts | 2 +- tfjs-layers/src/layers/normalization.ts | 2 +- tfjs-layers/src/layers/normalization_test.ts | 4 ++-- .../src/layers/preprocessing/image_resizing_test.ts | 2 +- tfjs-layers/src/layers/preprocessing/random_height.ts | 4 ++-- tfjs-layers/src/layers/preprocessing/random_width.ts | 4 ++-- tfjs-layers/src/layers/recurrent.ts | 8 ++++---- 11 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tfjs-layers/README.md b/tfjs-layers/README.md index cd462791eb9..1fd735fce4c 100644 --- a/tfjs-layers/README.md +++ b/tfjs-layers/README.md @@ -52,7 +52,7 @@ const ys = tf.tensor2d([[1], [3], [5], [7]], [4, 1]); // Train the model. await model.fit(xs, ys, {epochs: 500}); -// Ater the training, perform inference. +// After the training, perform inference. const output = model.predict(tf.tensor2d([[5]], [1, 1])); output.print(); ``` diff --git a/tfjs-layers/demos/README.md b/tfjs-layers/demos/README.md index 3930909f743..71385b3818e 100644 --- a/tfjs-layers/demos/README.md +++ b/tfjs-layers/demos/README.md @@ -16,7 +16,7 @@ Once the development environment is prepared, execute the build script from the ``` The script will construct a number of Keras models in Python and benchmark their training using the TensorFlow backend. When it is complete, it will bring up a -local HTTP server. Navigate to the local URL spcecified in stdout to bring up +local HTTP server. Navigate to the local URL specified in stdout to bring up the benchmarks page UI. There will be a button to begin the JS side of the benchmarks. Clicking the button will run through and time the same models, now running in the browser. diff --git a/tfjs-layers/src/layers/nlp/modeling/transformer_decoder_test.ts b/tfjs-layers/src/layers/nlp/modeling/transformer_decoder_test.ts index a2a03045b54..b0b0aa15bff 100644 --- a/tfjs-layers/src/layers/nlp/modeling/transformer_decoder_test.ts +++ b/tfjs-layers/src/layers/nlp/modeling/transformer_decoder_test.ts @@ -106,7 +106,7 @@ describe('TransformerDecoder', () => { const config = testLayer.getConfig(); const restored = TransformerDecoder.fromConfig(TransformerDecoder, config); - // Initializers don't get serailized with customObjects. + // Initializers don't get serialized with customObjects. delete ((config['kernelInitializer'] as serialization.ConfigDict )['config'] as serialization.ConfigDict)['customObjects']; delete ((config['biasInitializer'] as serialization.ConfigDict @@ -167,5 +167,5 @@ describe('TransformerDecoder', () => { expectTensorsClose(outputCache, noLoopCache); }); - // TODO(pforderique): Test mask propogation once supported. + // TODO(pforderique): Test mask propagation once supported. }); diff --git a/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts b/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts index b859e16e27f..f96b37f5d7f 100644 --- a/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts +++ b/tfjs-layers/src/layers/nlp/models/gpt2/gpt2_causal_lm.ts @@ -70,7 +70,7 @@ export declare interface GPT2CausalLMArgs extends PipelineModelArgs { } /** - * An end-to-end GPT2 model for causal langauge modeling. + * An end-to-end GPT2 model for causal language modeling. * * A causal language model (LM) predicts the next token based on previous * tokens. This task setup can be used to train the model unsupervised on diff --git a/tfjs-layers/src/layers/nlp/multihead_attention.ts b/tfjs-layers/src/layers/nlp/multihead_attention.ts index 46253c117c3..8c9df5ea27c 100644 --- a/tfjs-layers/src/layers/nlp/multihead_attention.ts +++ b/tfjs-layers/src/layers/nlp/multihead_attention.ts @@ -703,7 +703,7 @@ export class MultiHeadAttention extends Layer { newInputs = [inputs, kwargs['value']].concat(kwargs['key'] ?? []); - // TODO(pforderique): Support mask propogation. + // TODO(pforderique): Support mask propagation. return super.apply(newInputs, kwargs); } diff --git a/tfjs-layers/src/layers/normalization.ts b/tfjs-layers/src/layers/normalization.ts index 03193c83af0..1e4f8c56b8c 100644 --- a/tfjs-layers/src/layers/normalization.ts +++ b/tfjs-layers/src/layers/normalization.ts @@ -430,7 +430,7 @@ export interface LayerNormalizationLayerArgs extends LayerArgs { axis?: number|number[]; /** - * A small positive float added to variance to avoid divison by zero. + * A small positive float added to variance to avoid division by zero. * Defaults to 1e-3. */ epsilon?: number; diff --git a/tfjs-layers/src/layers/normalization_test.ts b/tfjs-layers/src/layers/normalization_test.ts index b7e3db11c6c..ad02fcf8fb4 100644 --- a/tfjs-layers/src/layers/normalization_test.ts +++ b/tfjs-layers/src/layers/normalization_test.ts @@ -353,7 +353,7 @@ describeMathCPUAndWebGL2('BatchNormalization Layers: Tensor', () => { const x = tensor2d([[1, 2], [3, 4]], [2, 2]); expectTensorsClose(layer.apply(x) as Tensor, x, 0.01); expect(layer.getWeights().length).toEqual(3); - // Firt weight is gamma. + // First weight is gamma. expectTensorsClose(layer.getWeights()[0], onesLike(layer.getWeights()[0])); // Second weight is moving mean. expectTensorsClose(layer.getWeights()[1], zerosLike(layer.getWeights()[1])); @@ -366,7 +366,7 @@ describeMathCPUAndWebGL2('BatchNormalization Layers: Tensor', () => { const x = tensor2d([[1, 2], [3, 4]], [2, 2]); expectTensorsClose(layer.apply(x) as Tensor, x, 0.01); expect(layer.getWeights().length).toEqual(3); - // Firt weight is beta. + // First weight is beta. expectTensorsClose(layer.getWeights()[0], zerosLike(layer.getWeights()[0])); // Second weight is moving mean. expectTensorsClose(layer.getWeights()[1], zerosLike(layer.getWeights()[1])); diff --git a/tfjs-layers/src/layers/preprocessing/image_resizing_test.ts b/tfjs-layers/src/layers/preprocessing/image_resizing_test.ts index 9afc0b7f85d..85089e771f7 100644 --- a/tfjs-layers/src/layers/preprocessing/image_resizing_test.ts +++ b/tfjs-layers/src/layers/preprocessing/image_resizing_test.ts @@ -88,7 +88,7 @@ describeMathCPUAndGPU('Resizing Layer', () => { }); it('Returns a tensor of the correct dtype', () => { - // do a same resizing operation, cheeck tensors dtypes and content + // do a same resizing operation, check tensors dtypes and content const height = 40; const width = 60; const numChannels = 3; diff --git a/tfjs-layers/src/layers/preprocessing/random_height.ts b/tfjs-layers/src/layers/preprocessing/random_height.ts index fa21f371e53..728d58c4bad 100644 --- a/tfjs-layers/src/layers/preprocessing/random_height.ts +++ b/tfjs-layers/src/layers/preprocessing/random_height.ts @@ -35,7 +35,7 @@ type InterpolationType = typeof INTERPOLATION_KEYS[number]; * * The input should be a 3D (unbatched) or * 4D (batched) tensor in the `"channels_last"` image data format. Input pixel - * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of interger + * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer * or floating point dtype. By default, the layer will output floats. * * tf methods implemented in tfjs: 'bilinear', 'nearest', @@ -48,7 +48,7 @@ export class RandomHeight extends BaseRandomLayer { /** @nocollapse */ static override className = 'RandomHeight'; private readonly factor: number | [number, number]; - private readonly interpolation?: InterpolationType; // defualt = 'bilinear + private readonly interpolation?: InterpolationType; // default = 'bilinear private heightLower: number; private heightUpper: number; private imgWidth: number; diff --git a/tfjs-layers/src/layers/preprocessing/random_width.ts b/tfjs-layers/src/layers/preprocessing/random_width.ts index 60b71490e6e..c969bd9c505 100644 --- a/tfjs-layers/src/layers/preprocessing/random_width.ts +++ b/tfjs-layers/src/layers/preprocessing/random_width.ts @@ -35,7 +35,7 @@ type InterpolationType = typeof INTERPOLATION_KEYS[number]; * * The input should be a 3D (unbatched) or * 4D (batched) tensor in the `"channels_last"` image data format. Input pixel - * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of interger + * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer * or floating point dtype. By default, the layer will output floats. * * tf methods implemented in tfjs: 'bilinear', 'nearest', @@ -48,7 +48,7 @@ export class RandomWidth extends BaseRandomLayer { /** @nocollapse */ static override className = 'RandomWidth'; private readonly factor: number | [number, number]; - private readonly interpolation?: InterpolationType; // defualt = 'bilinear + private readonly interpolation?: InterpolationType; // default = 'bilinear private widthLower: number; private widthUpper: number; private imgHeight: number; diff --git a/tfjs-layers/src/layers/recurrent.ts b/tfjs-layers/src/layers/recurrent.ts index b2af94772ed..95153ca239c 100644 --- a/tfjs-layers/src/layers/recurrent.ts +++ b/tfjs-layers/src/layers/recurrent.ts @@ -252,7 +252,7 @@ export declare interface BaseRNNLayerArgs extends LayerArgs { * see section "Note on passing external constants" below. * Porting Node: PyKeras overrides the `call()` signature of RNN cells, * which are Layer subtypes, to accept two arguments. tfjs-layers does - * not do such overriding. Instead we preseve the `call()` signature, + * not do such overriding. Instead we preserve the `call()` signature, * which due to its `Tensor|Tensor[]` argument and return value is * flexible enough to handle the inputs and states. * - a `stateSize` attribute. This can be a single integer (single state) @@ -757,7 +757,7 @@ export class RNN extends Layer { const output = this.returnSequences ? outputs : lastOutput; - // TODO(cais): Porperty set learning phase flag. + // TODO(cais): Property set learning phase flag. if (this.returnState) { return [output].concat(states); @@ -1933,7 +1933,7 @@ export class StackedRNNCells extends RNNCell { get stateSize(): number[] { // States are a flat list in reverse order of the cell stack. - // This allows perserving the requirement `stack.statesize[0] === + // This allows preserving the requirement `stack.statesize[0] === // outputDim`. E.g., states of a 2-layer LSTM would be `[h2, c2, h1, c1]`, // assuming one LSTM has states `[h, c]`. const stateSize: number[] = []; @@ -2098,7 +2098,7 @@ export class StackedRNNCells extends RNNCell { batchSetValue(tuples); } - // TODO(cais): Maybe implemnt `losses` and `getLossesFor`. + // TODO(cais): Maybe implement `losses` and `getLossesFor`. } serialization.registerClass(StackedRNNCells);