Skip to content

Commit

Permalink
Fix typos (PaddlePaddle#1455)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored and jiahy0825 committed May 25, 2023
1 parent 9ba250d commit ccdea5a
Show file tree
Hide file tree
Showing 12 changed files with 24 additions and 24 deletions.
12 changes: 6 additions & 6 deletions cinn/common/cas.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,15 +129,15 @@ Expr ProductGetNonConstantPart(Expr u) {

namespace detail {

// Is a Divisiable to b.
// Is a Divisible to b.
// @{
bool IsDivisible(int64_t a, int64_t b) {
CHECK_NE(b, 0);
return a % b == 0;
}
bool IsDivisible(const Sum* a, int b);

// If int a Divisiable to any operands of product b
// If int a Divisible to any operands of product b
bool IsDivisible(int a, const Product* b) {
if (a < 0) return false;
for (auto& item : b->operands()) {
Expand Down Expand Up @@ -694,7 +694,7 @@ std::vector<Expr> CasSimplifyMutator::SimplifyBinarySum(Expr left, Expr right) {
if (bi && bi->value == 0) return {a};
if (bf && bf->value == 0.f) return {a};

// customied case for Mod
// customized case for Mod
{
auto* am = a.As<Mod>();
auto* bm = b.As<Mod>();
Expand Down Expand Up @@ -1879,11 +1879,11 @@ Expr CasSimplifyMutator::SimplifyFracOp(Expr expr) {
}

// case 2
// sum/x or product/x is divisiable
// sum/x or product/x is divisible
if (bi) {
auto* a_sum = a.As<Sum>();
auto* a_product = a.As<Product>();
// disiviable
// divisible
if (a_sum && IsDivisible(a_sum, bi->value)) return Divide(a_sum, bi->value);
if (a_product) {
if (IsDivisible(a_product, bi->value) || IsDivisible(bi->value, a_product)) {
Expand Down Expand Up @@ -1915,7 +1915,7 @@ Expr CasSimplifyMutator::SimplifyFracOp(Expr expr) {
}
}

// not divisiable
// not divisible
/*
if (a_sum) {
auto expr = DividePartially(a_sum, bi->value);
Expand Down
2 changes: 1 addition & 1 deletion cinn/common/cas.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Expr ReplaceMaxToConstant(Expr expr);
struct CasInterval {
template <typename T>
CasInterval(T l, T r) : l(l), r(r) {
CHECK_LE(l, r) << "left shoud not be larger than right";
CHECK_LE(l, r) << "left should not be larger than right";
}

/**
Expand Down
2 changes: 1 addition & 1 deletion cinn/frontend/decomposer/batch_norm_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ void ComputeBatchNormTrainRef(const std::vector<T>& x,
};
Loop(func_y, n, c, h, w);

// new moving runnning and variance
// new moving running and variance
float factor_0 = momentum;
float factor_1 = static_cast<float>(1.0f - momentum);
for (int ic = 0; ic < c; ++ic) {
Expand Down
2 changes: 1 addition & 1 deletion cinn/frontend/op_mappers/paddle/batchnorm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ void BatchNormGradOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperCon

// batch norm grad, output(grad_x, grad_scale, grad_bias)
auto outs = ctx.Builder()->BatchNormGrad(dy, x, scale, saved_mean, saved_variance, epsilon, data_layout);
CHECK_EQ(outs.size(), 3ul) << "batch_norm_grad API's should return 3 Variable!";
CHECK_EQ(outs.size(), 3ul) << "batch_norm_grad APIs should return 3 Variable!";

for (int i = 0; i < outs.size(); i++) {
if (output_names[i].empty()) {
Expand Down
6 changes: 3 additions & 3 deletions cinn/frontend/pass/cast_collapsing.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,17 +171,17 @@ class CastCollapsingPass : public ProgramPass {

if (CheckCastBorder(cast, in2instr)) {
if (can_remove) {
VLOG(4) << "The op " << cast_info << " is a output op of graph, connot fuse, remove.";
VLOG(4) << "The op " << cast_info << " is a output op of graph, cannot fuse, remove.";
// this cast not used by any other op, remove
remove_instrs->insert(cast);
} else {
if (input_dtype == output_dtype) {
VLOG(4) << "The cast op " << cast_info << " is fetched but useless, replace with identity.";
// cannot remove, however, the transpsoe is useless, we can replace the cast with indentiy for more
// cannot remove, however, the transpose is useless, we can replace the cast with identity for more
// fusion opportunity
ReplaceWithIdentity(cast);
}
// else the transpsoe is fetched and helpful, ignore
// else the transpose is fetched and helpful, ignore
}
return;
}
Expand Down
6 changes: 3 additions & 3 deletions cinn/hlir/op/elementwise.cc
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ std::shared_ptr<OpStrategy> StrategyForFillConstant(const framework::NodeAttr &a

if (force_cpu && target != common::DefaultHostTarget()) {
LOG(WARNING) << "The attribute \"force_cpu\" of \"fill_constant\" not supported in CINN! The \"fill_constant\"'s "
"output tensot will placed on "
"output tensor will placed on "
<< target;
}

Expand Down Expand Up @@ -339,10 +339,10 @@ std::vector<Type> InferDtypeForFillConstant(const std::vector<Type> &inputs_type
out_type = common::Str2Type(dtype_str);
VLOG(3) << "FillConstant output dtype (from [dtype]): " << dtype_str;
} else {
// attribute [dtype] no given, infered by value's type
// attribute [dtype] no given, inferred by value's type
auto scalar = GetScalarExpr(attrs.at("value"));
out_type = scalar->type();
VLOG(3) << "FillConstant scalar type (from [vaule]): " << common::Type2Str(out_type);
VLOG(3) << "FillConstant scalar type (from [value]): " << common::Type2Str(out_type);
}
return {out_type};
}
Expand Down
2 changes: 1 addition & 1 deletion cinn/optim/buffer_assign.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ std::map<std::string, ir::Tensor> InitialAssignBuffer(Expr* expr,
}
}

// determine which tensor to have the initial buffer, and will share accross the cluser, we take a topological order
// determine which tensor to have the initial buffer, and will share across the cluster, we take a topological order
// of the computational graph, and find out which tensor comes first in a cluster.

auto _topo_order_topo_edges_ = comp_graph->topological_order();
Expand Down
2 changes: 1 addition & 1 deletion cinn/runtime/cinn_runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ typedef enum cinn_type_code_t {
#endif

/**
* A tuntime tag for type in CINN system.
* A runtime tag for type in CINN system.
*/
typedef struct cinn_type_t {
#if __cplusplus >= 201103L
Expand Down
2 changes: 1 addition & 1 deletion cinn/utils/event.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ std::string Summary::Format(const std::vector<HostEvent> &events) {
unique_items[e.annotation_] = &items.back();
unique_items.at(e.annotation_)->info.duration_ = 0.0;
}
// Sum cost for categorey
// Sum cost for category
category_cost[e.type_] += e.duration_;
total_cost += e.duration_;
max_annot_size = std::max(max_annot_size, e.annotation_.size());
Expand Down
6 changes: 3 additions & 3 deletions cmake/core.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ function(merge_static_libs TARGET_NAME)
COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS}
DEPENDS ${libs} ${target_OBJS})

# Generate dummy staic lib
# Generate dummy static lib
file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";")
add_library(${TARGET_NAME} STATIC ${target_SRCS})
target_link_libraries(${TARGET_NAME} ${libs_deps})
Expand All @@ -275,7 +275,7 @@ function(merge_static_libs TARGET_NAME)
COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS}
DEPENDS ${libs})

# Generate dummy staic lib
# Generate dummy static lib
file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";")
add_library(${TARGET_NAME} STATIC ${target_SRCS})
target_link_libraries(${TARGET_NAME} ${libs_deps})
Expand All @@ -284,7 +284,7 @@ function(merge_static_libs TARGET_NAME)
# Get the file names of the libraries to be merged
set(libfiles ${libfiles} $<TARGET_FILE:${lib}>)
endforeach()
# msvc will put libarary in directory of "/Release/xxxlib" by default
# msvc will put library in directory of "/Release/xxxlib" by default
# COMMAND cmake -E remove "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/${TARGET_NAME}.lib"
add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
COMMAND cmake -E make_directory "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}"
Expand Down
4 changes: 2 additions & 2 deletions docs/design.org
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ It by default can be resized to proper shape by binding to multiple tensors.

All the buffers will be maintained in global scope, and alloc or dealloc in local scopes.

The benefit is buffer is easy to shared accross multiple statements.
The benefit is buffer is easy to shared across multiple statements.
** Module
Module is the container of LoweredFuncs and Buffers.
There might be more than one module in an generated execution.
Expand All @@ -334,7 +334,7 @@ then replace call with the statement.

There are several steps to vectorize a forloop:

- the foorloop will first been split with a small factor(better to be times of the SIMD width
- the forloop will first been split with a small factor(better to be times of the SIMD width
- convert the PolyFor to For
- Substitute the iterator variable of the forloop to a Ramp node
- Transform all the operations related to the Ramp
Expand Down
2 changes: 1 addition & 1 deletion infrt/common/axis.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
namespace infrt {
namespace common {

//! Get the predifined axis name.
//! Get the predefined axis name.
const std::string& axis_name(int level);
bool IsAxisNameReserved(const std::string& x);

Expand Down

0 comments on commit ccdea5a

Please sign in to comment.