Skip to content
This repository has been archived by the owner on Jan 24, 2024. It is now read-only.

op unittest for cbrt/ceil/cholesky/concat/constant/fill_constant #1495

Merged
merged 11 commits into from
Jun 8, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cinn/frontend/net_builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ class NetBuilder {
const std::string& id_hint = "");

/**
* @brief Create constant tensor with the specific value/vector and type, the type is infered from value.
* @brief Create constant tensor with the specific value/vector and type
* @param value The constant value to be set.
* @param name The name of output variable.
* @return The result variable.
Expand Down
27 changes: 19 additions & 8 deletions cinn/hlir/op/elementwise.cc
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,7 @@ std::shared_ptr<OpStrategy> StrategyForConstScalar(const framework::NodeAttr &at
framework::CINNCompute const_scalar_compute([=](lang::Args args, lang::RetValue *ret) {
CHECK(!args.empty()) << "The input argument of const_float compute is empty! Please check.";
auto scalar = GetScalarExpr(attrs.attr_store.at("value"));
auto scalar_type = out_type.at(0);
CINNValuePack pack_args = args[0];
std::string tensor_name = UniqName("const_scalar_Out");
if (FLAGS_cinn_ir_schedule) {
Expand All @@ -210,7 +211,12 @@ std::shared_ptr<OpStrategy> StrategyForConstScalar(const framework::NodeAttr &at
}

auto out = lang::Compute(
{Expr(1)}, [=](const std::vector<Expr> &indice) { return scalar; }, tensor_name);
{Expr(1)},
[=](const std::vector<Expr> &indice) {
auto res = (scalar_type == scalar->type()) ? scalar : ir::Cast::Make(scalar_type, scalar);
return res;
},
tensor_name);
CHECK(out.defined()) << "can't create const scalar with the given type " << out_type[0];
auto stages = CreateStages({out});
*ret = CINNValuePack{{CINNValue(out), CINNValue(stages)}};
Expand All @@ -229,11 +235,16 @@ std::vector<shape_t> InferShapeForConstScalar(const std::vector<shape_t> &inputs
}

std::vector<Type> InferDtypeForConstScalar(const std::vector<Type> &inputs_type, const framework::AttrMapType &attrs) {
CHECK(attrs.count("value"));
auto scalar = GetScalarExpr(attrs.at("value"));
auto out_type = scalar->type();
VLOG(3) << "scalar type: " << out_type;
return {out_type};
Type scalar_type;
if (attrs.find("dtype") != attrs.end()) {
auto dtype_str = absl::get<std::string>(attrs.at("dtype"));
if (!dtype_str.empty()) {
scalar_type = common::Str2Type(dtype_str);
}
}
CHECK(!scalar_type.is_unk());
VLOG(3) << "scalar type: " << scalar_type;
return {scalar_type};
}

std::vector<std::vector<std::string>> InferLayoutForConstScalar(const std::vector<framework::shape_t> &input_shapes,
Expand Down Expand Up @@ -356,10 +367,10 @@ std::vector<std::vector<std::string>> InferLayoutForFillConstant(const std::vect

#define EXPAND_ATTR_TYPE(MACRO) \
MACRO(bool) \
MACRO(float) \
MACRO(int) \
MACRO(int64_t) \
MACRO(double)
MACRO(double) \
MACRO(float)

std::shared_ptr<OpStrategy> StrategyForAssignValue(const framework::NodeAttr &attrs,
const std::vector<ir::Tensor> &inputs,
Expand Down
2 changes: 0 additions & 2 deletions cinn/pybind/frontend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,6 @@ static const char *SnakeName(const char *name) {

#define EXPAND_CINN_SUPPORT_TYPE(EXPAND_MACRO) \
EXPAND_MACRO(bool) \
EXPAND_MACRO(float) \
EXPAND_MACRO(int) \
EXPAND_MACRO(int64_t) \
EXPAND_MACRO(double)

thisjiang marked this conversation as resolved.
Show resolved Hide resolved
Expand Down
130 changes: 93 additions & 37 deletions python/tests/ops/test_cbrt_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,26 +14,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
from op_test import OpTest, OpTestTool
import paddle
import paddle.nn.functional as F
import cinn
from cinn.frontend import *
import numpy as np
from cinn.common import *
from cinn.frontend import *
from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper


@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestCbrtOp(OpTest):
def setUp(self):
self.init_case()
print(f"\nRunning {self.__class__.__name__}: {self.case}")
self.inputs = {}
self.prepare_inputs()

def init_case(self):
def prepare_inputs(self):
self.inputs = {
"x": np.array([0, 1, 0.01, 27, 1000000,
0.970299]).astype("float32")
"x": self.random(self.case["shape"], self.case["dtype"], -1.0,
thisjiang marked this conversation as resolved.
Show resolved Hide resolved
1.0),
}

def build_paddle_program(self, target):
Expand All @@ -43,44 +43,100 @@ def build_paddle_program(self, target):

def build_cinn_program(self, target):
builder = NetBuilder("cbrt")
x = builder.create_input(Float(32), self.inputs["x"].shape, "x")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
out = builder.cbrt(x)

prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]],
[out])

self.cinn_outputs = [res[0]]
self.cinn_outputs = res

def test_check_results(self):
self.check_outputs_and_grads()


class TestCbrtCase1(TestCbrtOp):
def init_case(self):
self.inputs = {
"x":
np.array([0, 1, 0.01, 27, 1000000, 0.970299, 124483,
13.7396]).astype("float32")
}


class TestCbrtCase2(TestCbrtOp):
def init_case(self):
self.inputs = {
"x":
np.array([[0, 1, 0.01, 27], [1000000, 0.970299, 124483,
13.7396]]).astype("float32"),
}


class TestCbrtCase3(TestCbrtOp):
def init_case(self):
np.random.seed(0)
self.inputs = {
"x": np.random.random((32, 64)).astype("float32"),
}
class TestCbrtOpShape(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestCbrtOpShape"
self.cls = TestCbrtOp
self.inputs = [
{
"shape": [10],
},
{
"shape": [8, 5],
},
{
"shape": [10, 3, 5],
},
{
"shape": [80, 40, 5, 7],
},
{
"shape": [80, 1, 5, 7],
},
{
"shape": [80, 3, 1024, 7],
},
{
"shape": [10, 5, 1024, 2048],
},
{
"shape": [1],
},
{
"shape": [512],
},
{
"shape": [1024],
},
{
"shape": [2048],
},
{
"shape": [1, 1, 1, 1],
},
]
self.dtypes = [
{
"dtype": "float32"
},
]
self.attrs = []


class TestCbrtOpDtype(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestCbrtOpDtype"
self.cls = TestCbrtOp
self.inputs = [
{
"shape": [1],
},
{
"shape": [5],
},
{
"shape": [80, 40, 5, 7],
},
]
self.dtypes = [
{
"dtype": "float16"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
]
self.attrs = []


if __name__ == "__main__":
unittest.main()
TestCbrtOpShape().run()
TestCbrtOpDtype().run()
109 changes: 91 additions & 18 deletions python/tests/ops/test_ceil_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,27 +14,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
from op_test import OpTest, OpTestTool
import paddle
import cinn
from cinn.frontend import *
from cinn.common import *
from cinn.frontend import *
from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper


@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestCeilOp(OpTest):
def setUp(self):
self.init_case()
print(f"\nRunning {self.__class__.__name__}: {self.case}")
self.inputs = {}
self.prepare_inputs()

def init_case(self):
def prepare_inputs(self):
self.inputs = {
"x": np.random.random([
32,
64,
]).astype("float32") * 2 - 1
"x": self.random(self.case["shape"], self.case["dtype"], -1.0,
thisjiang marked this conversation as resolved.
Show resolved Hide resolved
1.0),
}

def build_paddle_program(self, target):
Expand All @@ -47,7 +45,9 @@ def build_paddle_program(self, target):
# the forward result will be incorrect.
def build_cinn_program(self, target):
builder = NetBuilder("ceil")
x = builder.create_input(Float(32), self.inputs["x"].shape, "x")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
out = builder.ceil(x)

prog = builder.build()
Expand All @@ -60,12 +60,85 @@ def test_check_results(self):
self.check_outputs_and_grads()


class TestCeilCase1(TestCeilOp):
def init_case(self):
self.inputs = {
"x": np.random.random([10201, 50]).astype("float32") * 3 - 1
}
class TestCeilOpShape(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestCeilOpShape"
self.cls = TestCeilOp
self.inputs = [
{
"shape": [10],
},
{
"shape": [8, 5],
},
{
"shape": [10, 3, 5],
},
{
"shape": [80, 40, 5, 7],
},
{
"shape": [80, 1, 5, 7],
},
{
"shape": [80, 3, 1024, 7],
},
{
"shape": [10, 5, 1024, 2048],
},
{
"shape": [1],
},
{
"shape": [512],
},
{
"shape": [1024],
},
{
"shape": [2048],
},
{
"shape": [1, 1, 1, 1],
},
]
self.dtypes = [
{
"dtype": "float32"
},
]
self.attrs = []


class TestCeilOpDtype(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestCeilOpDtype"
self.cls = TestCeilOp
self.inputs = [
{
"shape": [1],
},
{
"shape": [5],
},
{
"shape": [80, 40, 5, 7],
},
]
self.dtypes = [
{
"dtype": "float16"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
]
self.attrs = []


if __name__ == "__main__":
unittest.main()
TestCeilOpShape().run()
TestCeilOpDtype().run()
Loading