Skip to content
This repository has been archived by the owner on Jan 24, 2024. It is now read-only.

Add Op UnitTest for batchnorm #1503

Merged
merged 22 commits into from
Jun 25, 2023
Merged
Changes from 20 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
202 changes: 110 additions & 92 deletions python/tests/ops/test_batch_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import unittest, sys
import numpy as np
from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
import paddle
import cinn
from cinn.frontend import *
Expand All @@ -27,21 +28,17 @@
"x86 test will be skipped due to timeout.")
class TestBatchNormTrainOp(OpTest):
def setUp(self):
self.init_case()
print(f"\nRunning {self.__class__.__name__}: {self.case}")
self.prepare_inputs()

def init_case(self):
self.num_channels = 16
self.inputs = {
"x":
self.random([2, self.num_channels, 8, 8], "float32", 0.0, 1.0),
"dout":
self.random([2, self.num_channels, 8, 8], "float32", 1e-7, 1e-6),
}
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])

def build_paddle_program(self, target):
x = paddle.to_tensor(self.inputs["x"])
x = paddle.to_tensor(self.x_np)
batch_norm = paddle.nn.BatchNorm(
self.num_channels, act=None, is_test=False)
self.case["x_shape"][1], act=None, is_test=False)
out = batch_norm(x)

self.paddle_outputs = [out]
Expand All @@ -51,110 +48,115 @@ def build_paddle_program(self, target):
def build_cinn_program(self, target):
builder = NetBuilder("batch_norm")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
scale = builder.fill_constant([self.num_channels], 1.0, 'scale',
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
scale = builder.fill_constant([self.case["x_shape"][1]], 1.0, 'scale',
'float32')
bias = builder.fill_constant([self.num_channels], 0.0, 'bias',
bias = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'bias',
'float32')
mean = builder.fill_constant([self.num_channels], 0.0, 'mean',
mean = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'mean',
'float32')
variance = builder.fill_constant([self.num_channels], 1.0, 'variance',
'float32')
variance = builder.fill_constant([self.case["x_shape"][1]], 1.0,
'variance', 'float32')

out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)

prog = builder.build()
forward_res = self.get_cinn_output(
prog, target, [x], [self.inputs["x"]], out, passes=[])
prog, target, [x], [self.x_np], out, passes=[])
self.cinn_outputs = [forward_res[0]]

def test_check_results(self):
self.check_outputs_and_grads()


# Reopen after decomposer infer dtype fixed
class TestBatchNormTrainFP16(TestBatchNormTrainOp):
def init_case(self):
self.num_channels = 16
self.inputs = {
"x": self.random([2, self.num_channels, 8, 8], "float16"),
"dout": self.random([2, self.num_channels, 8, 8], "float16"),
}

def test_check_results(self):
self.check_outputs_and_grads(max_relative_error=1e-3)


class TestBatchNormTrainBF16(TestBatchNormTrainOp):
def init_case(self):
self.num_channels = 16
x = self.random([2, self.num_channels, 8, 8], "bfloat16")
dout = self.random([2, self.num_channels, 8, 8], "bfloat16")
self.inputs = {
"x": x,
"dout": dout,
}

def test_check_results(self):
self.check_outputs_and_grads(max_relative_error=1e-2)
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
self.check_outputs_and_grads(max_relative_error=max_relative_error)


class TestBatchNormTrainOpAll(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestBatchNormTrainOpCase"
self.cls = TestBatchNormTrainOp

self.inputs = [
{
"x_shape": [2, 16, 8, 8],
},
{
"x_shape": [2, 16, 8, 1],
},
{
"x_shape": [2, 16, 2048, 8],
},
]
self.dtypes = [
{
"x_dtype": "float16",
"max_relative_error": 1e-3
},
{
"x_dtype": "float32",
"max_relative_error": 1e-5
},
{
"x_dtype": "bfloat16",
"max_relative_error": 1e-2
},
]
self.attrs = []


@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestBatchNormBackwardOp(OpTest):
def setUp(self):
self.init_case()
print(f"\nRunning {self.__class__.__name__}: {self.case}")
self.prepare_inputs()

def init_case(self):
self.num_channels = 16
self.inputs = {
"x":
self.random([2, self.num_channels, 8, 8], "float32", 0.0, 10.0),
"dout":
self.random([2, self.num_channels, 8, 8], "float32", 1e-7, 1e-6),
}
def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])
self.y_np = self.random(
shape=self.case["x_shape"], dtype=self.case["x_dtype"])

def build_paddle_program(self, target):
x = paddle.to_tensor(self.inputs["x"], stop_gradient=False)
x = paddle.to_tensor(self.x_np, stop_gradient=False)
batch_norm = paddle.nn.BatchNorm(
self.num_channels, act=None, is_test=False)
self.case["x_shape"][1], act=None, is_test=False)
out = batch_norm(x)

self.paddle_outputs = [out]
self.paddle_grads = self.get_paddle_grads([out], [x],
[self.inputs["dout"]])
self.paddle_grads = self.get_paddle_grads([out], [x], [self.y_np])

# Note: If the forward and backward operators are run in the same program,
# the forward result will be incorrect.
def build_cinn_program(self, target):
builder = NetBuilder("batch_norm")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
scale = builder.fill_constant([self.num_channels], 1.0, 'scale',
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
scale = builder.fill_constant([self.case["x_shape"][1]], 1.0, 'scale',
'float32')
bias = builder.fill_constant([self.num_channels], 0.0, 'bias',
bias = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'bias',
'float32')
mean = builder.fill_constant([self.num_channels], 0.0, 'mean',
mean = builder.fill_constant([self.case["x_shape"][1]], 0.0, 'mean',
'float32')
variance = builder.fill_constant([self.num_channels], 1.0, 'variance',
'float32')
variance = builder.fill_constant([self.case["x_shape"][1]], 1.0,
'variance', 'float32')

out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)

prog = builder.build()
forward_res = self.get_cinn_output(
prog, target, [x], [self.inputs["x"]], out, passes=[])
prog, target, [x], [self.x_np], out, passes=[])
self.cinn_outputs = [forward_res[0]]

builder_grad = NetBuilder("batch_norm_grad")
dout = builder_grad.create_input(
self.nptype2cinntype(self.inputs["dout"].dtype),
self.inputs["dout"].shape, "dout")
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"dout")
x_g = builder_grad.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x_g")
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x_g")
scale_g = builder_grad.fill_constant(scale.shape(), 1.0, 'scale_g',
'float32')
save_mean = builder_grad.create_input(
Expand All @@ -167,30 +169,45 @@ def build_cinn_program(self, target):
prog = builder_grad.build()
backward_res = self.get_cinn_output(
prog,
target, [dout, x_g, save_mean, save_variance], [
self.inputs["dout"], self.inputs["x"], forward_res[1],
forward_res[2]
],
target, [dout, x_g, save_mean, save_variance],
[self.y_np, self.x_np, forward_res[1], forward_res[2]],
out_grad,
passes=[])
self.cinn_grads = [backward_res[0]]

def test_check_results(self):
self.check_outputs_and_grads()


class TestBatchNormBackwardFP16(TestBatchNormBackwardOp):
def init_case(self):
self.num_channels = 16
self.inputs = {
"x":
self.random([2, self.num_channels, 8, 8], "float16", 0.0, 10.0),
"dout":
self.random([2, self.num_channels, 8, 8], "float16", 1e-7, 1e-6),
}

def test_check_results(self):
self.check_outputs_and_grads(max_relative_error=1e-3)
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
self.check_outputs_and_grads(max_relative_error=max_relative_error)


class TestBatchNormBackwardOpAll(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestBatchNormBackwardOpCase"
self.cls = TestBatchNormBackwardOp

self.inputs = [
{
"x_shape": [2, 16, 8, 8],
},
{
"x_shape": [2, 16, 8, 1],
},
{
"x_shape": [2, 16, 2048, 8],
},
]
self.dtypes = [
{
"x_dtype": "float16",
"max_relative_error": 1e-3
},
{
"x_dtype": "float32",
"max_relative_error": 1e-5
},
]
self.attrs = []


@OpTestTool.skip_if(not is_compiled_with_cuda(),
Expand Down Expand Up @@ -242,4 +259,5 @@ def test_check_results(self):


if __name__ == "__main__":
unittest.main()
TestBatchNormTrainOpAll().run()
TestBatchNormBackwardOpAll().run()
thisjiang marked this conversation as resolved.
Show resolved Hide resolved