From ccdea5a7d57ff4cc3cb4a41f8542049abbab2fb5 Mon Sep 17 00:00:00 2001 From: co63oc Date: Tue, 23 May 2023 19:28:50 +0800 Subject: [PATCH] Fix typos (#1455) --- cinn/common/cas.cc | 12 ++++++------ cinn/common/cas.h | 2 +- cinn/frontend/decomposer/batch_norm_test.cc | 2 +- cinn/frontend/op_mappers/paddle/batchnorm.cc | 2 +- cinn/frontend/pass/cast_collapsing.cc | 6 +++--- cinn/hlir/op/elementwise.cc | 6 +++--- cinn/optim/buffer_assign.cc | 2 +- cinn/runtime/cinn_runtime.h | 2 +- cinn/utils/event.cc | 2 +- cmake/core.cmake | 6 +++--- docs/design.org | 4 ++-- infrt/common/axis.h | 2 +- 12 files changed, 24 insertions(+), 24 deletions(-) diff --git a/cinn/common/cas.cc b/cinn/common/cas.cc index 6d66a94a1f..44607a5574 100644 --- a/cinn/common/cas.cc +++ b/cinn/common/cas.cc @@ -129,7 +129,7 @@ Expr ProductGetNonConstantPart(Expr u) { namespace detail { -// Is a Divisiable to b. +// Is a Divisible to b. // @{ bool IsDivisible(int64_t a, int64_t b) { CHECK_NE(b, 0); @@ -137,7 +137,7 @@ bool IsDivisible(int64_t a, int64_t b) { } bool IsDivisible(const Sum* a, int b); -// If int a Divisiable to any operands of product b +// If int a Divisible to any operands of product b bool IsDivisible(int a, const Product* b) { if (a < 0) return false; for (auto& item : b->operands()) { @@ -694,7 +694,7 @@ std::vector CasSimplifyMutator::SimplifyBinarySum(Expr left, Expr right) { if (bi && bi->value == 0) return {a}; if (bf && bf->value == 0.f) return {a}; - // customied case for Mod + // customized case for Mod { auto* am = a.As(); auto* bm = b.As(); @@ -1879,11 +1879,11 @@ Expr CasSimplifyMutator::SimplifyFracOp(Expr expr) { } // case 2 - // sum/x or product/x is divisiable + // sum/x or product/x is divisible if (bi) { auto* a_sum = a.As(); auto* a_product = a.As(); - // disiviable + // divisible if (a_sum && IsDivisible(a_sum, bi->value)) return Divide(a_sum, bi->value); if (a_product) { if (IsDivisible(a_product, bi->value) || IsDivisible(bi->value, a_product)) { @@ -1915,7 +1915,7 @@ Expr CasSimplifyMutator::SimplifyFracOp(Expr expr) { } } - // not divisiable + // not divisible /* if (a_sum) { auto expr = DividePartially(a_sum, bi->value); diff --git a/cinn/common/cas.h b/cinn/common/cas.h index 4b25bb990f..03fa5181f5 100755 --- a/cinn/common/cas.h +++ b/cinn/common/cas.h @@ -37,7 +37,7 @@ Expr ReplaceMaxToConstant(Expr expr); struct CasInterval { template CasInterval(T l, T r) : l(l), r(r) { - CHECK_LE(l, r) << "left shoud not be larger than right"; + CHECK_LE(l, r) << "left should not be larger than right"; } /** diff --git a/cinn/frontend/decomposer/batch_norm_test.cc b/cinn/frontend/decomposer/batch_norm_test.cc index a31736bcbf..c2f6b8e1dc 100755 --- a/cinn/frontend/decomposer/batch_norm_test.cc +++ b/cinn/frontend/decomposer/batch_norm_test.cc @@ -128,7 +128,7 @@ void ComputeBatchNormTrainRef(const std::vector& x, }; Loop(func_y, n, c, h, w); - // new moving runnning and variance + // new moving running and variance float factor_0 = momentum; float factor_1 = static_cast(1.0f - momentum); for (int ic = 0; ic < c; ++ic) { diff --git a/cinn/frontend/op_mappers/paddle/batchnorm.cc b/cinn/frontend/op_mappers/paddle/batchnorm.cc index 33ebf6b02c..133ce244f6 100644 --- a/cinn/frontend/op_mappers/paddle/batchnorm.cc +++ b/cinn/frontend/op_mappers/paddle/batchnorm.cc @@ -145,7 +145,7 @@ void BatchNormGradOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperCon // batch norm grad, output(grad_x, grad_scale, grad_bias) auto outs = ctx.Builder()->BatchNormGrad(dy, x, scale, saved_mean, saved_variance, epsilon, data_layout); - CHECK_EQ(outs.size(), 3ul) << "batch_norm_grad API's should return 3 Variable!"; + CHECK_EQ(outs.size(), 3ul) << "batch_norm_grad APIs should return 3 Variable!"; for (int i = 0; i < outs.size(); i++) { if (output_names[i].empty()) { diff --git a/cinn/frontend/pass/cast_collapsing.cc b/cinn/frontend/pass/cast_collapsing.cc index dd33369803..50e7001e09 100644 --- a/cinn/frontend/pass/cast_collapsing.cc +++ b/cinn/frontend/pass/cast_collapsing.cc @@ -171,17 +171,17 @@ class CastCollapsingPass : public ProgramPass { if (CheckCastBorder(cast, in2instr)) { if (can_remove) { - VLOG(4) << "The op " << cast_info << " is a output op of graph, connot fuse, remove."; + VLOG(4) << "The op " << cast_info << " is a output op of graph, cannot fuse, remove."; // this cast not used by any other op, remove remove_instrs->insert(cast); } else { if (input_dtype == output_dtype) { VLOG(4) << "The cast op " << cast_info << " is fetched but useless, replace with identity."; - // cannot remove, however, the transpsoe is useless, we can replace the cast with indentiy for more + // cannot remove, however, the transpose is useless, we can replace the cast with identity for more // fusion opportunity ReplaceWithIdentity(cast); } - // else the transpsoe is fetched and helpful, ignore + // else the transpose is fetched and helpful, ignore } return; } diff --git a/cinn/hlir/op/elementwise.cc b/cinn/hlir/op/elementwise.cc index 4a7ebc212d..a77c07a98a 100644 --- a/cinn/hlir/op/elementwise.cc +++ b/cinn/hlir/op/elementwise.cc @@ -295,7 +295,7 @@ std::shared_ptr StrategyForFillConstant(const framework::NodeAttr &a if (force_cpu && target != common::DefaultHostTarget()) { LOG(WARNING) << "The attribute \"force_cpu\" of \"fill_constant\" not supported in CINN! The \"fill_constant\"'s " - "output tensot will placed on " + "output tensor will placed on " << target; } @@ -339,10 +339,10 @@ std::vector InferDtypeForFillConstant(const std::vector &inputs_type out_type = common::Str2Type(dtype_str); VLOG(3) << "FillConstant output dtype (from [dtype]): " << dtype_str; } else { - // attribute [dtype] no given, infered by value's type + // attribute [dtype] no given, inferred by value's type auto scalar = GetScalarExpr(attrs.at("value")); out_type = scalar->type(); - VLOG(3) << "FillConstant scalar type (from [vaule]): " << common::Type2Str(out_type); + VLOG(3) << "FillConstant scalar type (from [value]): " << common::Type2Str(out_type); } return {out_type}; } diff --git a/cinn/optim/buffer_assign.cc b/cinn/optim/buffer_assign.cc index 009e32dd21..0b59feb339 100644 --- a/cinn/optim/buffer_assign.cc +++ b/cinn/optim/buffer_assign.cc @@ -95,7 +95,7 @@ std::map InitialAssignBuffer(Expr* expr, } } - // determine which tensor to have the initial buffer, and will share accross the cluser, we take a topological order + // determine which tensor to have the initial buffer, and will share across the cluster, we take a topological order // of the computational graph, and find out which tensor comes first in a cluster. auto _topo_order_topo_edges_ = comp_graph->topological_order(); diff --git a/cinn/runtime/cinn_runtime.h b/cinn/runtime/cinn_runtime.h index 0441d68d38..e8243e26be 100755 --- a/cinn/runtime/cinn_runtime.h +++ b/cinn/runtime/cinn_runtime.h @@ -62,7 +62,7 @@ typedef enum cinn_type_code_t { #endif /** - * A tuntime tag for type in CINN system. + * A runtime tag for type in CINN system. */ typedef struct cinn_type_t { #if __cplusplus >= 201103L diff --git a/cinn/utils/event.cc b/cinn/utils/event.cc index 031ec7f110..3e0ceaf081 100644 --- a/cinn/utils/event.cc +++ b/cinn/utils/event.cc @@ -66,7 +66,7 @@ std::string Summary::Format(const std::vector &events) { unique_items[e.annotation_] = &items.back(); unique_items.at(e.annotation_)->info.duration_ = 0.0; } - // Sum cost for categorey + // Sum cost for category category_cost[e.type_] += e.duration_; total_cost += e.duration_; max_annot_size = std::max(max_annot_size, e.annotation_.size()); diff --git a/cmake/core.cmake b/cmake/core.cmake index d9c8aed777..72afc1a596 100644 --- a/cmake/core.cmake +++ b/cmake/core.cmake @@ -254,7 +254,7 @@ function(merge_static_libs TARGET_NAME) COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS} DEPENDS ${libs} ${target_OBJS}) - # Generate dummy staic lib + # Generate dummy static lib file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";") add_library(${TARGET_NAME} STATIC ${target_SRCS}) target_link_libraries(${TARGET_NAME} ${libs_deps}) @@ -275,7 +275,7 @@ function(merge_static_libs TARGET_NAME) COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS} DEPENDS ${libs}) - # Generate dummy staic lib + # Generate dummy static lib file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";") add_library(${TARGET_NAME} STATIC ${target_SRCS}) target_link_libraries(${TARGET_NAME} ${libs_deps}) @@ -284,7 +284,7 @@ function(merge_static_libs TARGET_NAME) # Get the file names of the libraries to be merged set(libfiles ${libfiles} $) endforeach() - # msvc will put libarary in directory of "/Release/xxxlib" by default + # msvc will put library in directory of "/Release/xxxlib" by default # COMMAND cmake -E remove "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/${TARGET_NAME}.lib" add_custom_command(TARGET ${TARGET_NAME} POST_BUILD COMMAND cmake -E make_directory "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}" diff --git a/docs/design.org b/docs/design.org index 6b07939739..65e1ab735c 100644 --- a/docs/design.org +++ b/docs/design.org @@ -315,7 +315,7 @@ It by default can be resized to proper shape by binding to multiple tensors. All the buffers will be maintained in global scope, and alloc or dealloc in local scopes. -The benefit is buffer is easy to shared accross multiple statements. +The benefit is buffer is easy to shared across multiple statements. ** Module Module is the container of LoweredFuncs and Buffers. There might be more than one module in an generated execution. @@ -334,7 +334,7 @@ then replace call with the statement. There are several steps to vectorize a forloop: -- the foorloop will first been split with a small factor(better to be times of the SIMD width +- the forloop will first been split with a small factor(better to be times of the SIMD width - convert the PolyFor to For - Substitute the iterator variable of the forloop to a Ramp node - Transform all the operations related to the Ramp diff --git a/infrt/common/axis.h b/infrt/common/axis.h index 9014719bd7..5b9708ac97 100644 --- a/infrt/common/axis.h +++ b/infrt/common/axis.h @@ -7,7 +7,7 @@ namespace infrt { namespace common { -//! Get the predifined axis name. +//! Get the predefined axis name. const std::string& axis_name(int level); bool IsAxisNameReserved(const std::string& x);