Skip to content
This repository has been archived by the owner on Jan 24, 2024. It is now read-only.

Add Op UnitTest for batchnorm #1503

Merged
merged 22 commits into from
Jun 25, 2023
Merged
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
191 changes: 99 additions & 92 deletions python/tests/ops/test_batch_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import unittest, sys
import numpy as np
from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
import paddle
import cinn
from cinn.frontend import *
Expand All @@ -30,18 +31,12 @@ def setUp(self):
self.init_case()

def init_case(self):
self.num_channels = 16
self.inputs = {
"x":
self.random([2, self.num_channels, 8, 8], "float32", 0.0, 1.0),
"dout":
self.random([2, self.num_channels, 8, 8], "float32", 1e-7, 1e-6),
}
self.inputs = self.case

def build_paddle_program(self, target):
x = paddle.to_tensor(self.inputs["x"])
batch_norm = paddle.nn.BatchNorm(
self.num_channels, act=None, is_test=False)
self.inputs["num_channels"], act=None, is_test=False)
out = batch_norm(x)

self.paddle_outputs = [out]
Expand All @@ -53,14 +48,14 @@ def build_cinn_program(self, target):
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
scale = builder.fill_constant([self.num_channels], 1.0, 'scale',
'float32')
bias = builder.fill_constant([self.num_channels], 0.0, 'bias',
'float32')
mean = builder.fill_constant([self.num_channels], 0.0, 'mean',
'float32')
variance = builder.fill_constant([self.num_channels], 1.0, 'variance',
'float32')
scale = builder.fill_constant([self.inputs["num_channels"]], 1.0,
'scale', "float32")
bias = builder.fill_constant([self.inputs["num_channels"]], 0.0,
'bias', "float32")
mean = builder.fill_constant([self.inputs["num_channels"]], 0.0,
'mean', "float32")
variance = builder.fill_constant([self.inputs["num_channels"]], 1.0,
'variance', "float32")

out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)

Expand All @@ -70,20 +65,25 @@ def build_cinn_program(self, target):
self.cinn_outputs = [forward_res[0]]

def test_check_results(self):
self.check_outputs_and_grads()
self.check_outputs_and_grads(max_relative_error=1e-3)


# Reopen after decomposer infer dtype fixed
class TestBatchNormTrainFP16(TestBatchNormTrainOp):
def init_case(self):
self.num_channels = 16
self.inputs = {
"x": self.random([2, self.num_channels, 8, 8], "float16"),
"dout": self.random([2, self.num_channels, 8, 8], "float16"),
}
class TestBatchNormTrainOpAll(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestBatchNormTrainOpBase"
self.cls = TestBatchNormTrainOp

def test_check_results(self):
self.check_outputs_and_grads(max_relative_error=1e-3)
self.inputs = []
for x_shape in [[2, 16, 8, 8], [2, 16, 8, 1], [2, 16, 2048, 8]]:
for x_type in ["float16", "float32"]:
self.inputs.append({
Liyulingyue marked this conversation as resolved.
Show resolved Hide resolved
"x":
self.random(x_shape, x_type, 0.0, 1.0),
"dout":
self.random(x_shape, x_type, 1e-7, 1e-6),
"num_channels":
x_shape[1]
})


class TestBatchNormTrainBF16(TestBatchNormTrainOp):
Expand All @@ -107,87 +107,94 @@ def setUp(self):
self.init_case()

def init_case(self):
self.num_channels = 16
self.inputs = {
self.inputs = [{
"x":
self.random([2, self.num_channels, 8, 8], "float32", 0.0, 10.0),
self.random([2, 16, 8, 8], "float32", 0.0, 10.0),
"dout":
self.random([2, self.num_channels, 8, 8], "float32", 1e-7, 1e-6),
}
self.random([2, 16, 8, 8], "float32", 1e-7, 1e-6),
"num_channels":
16
}]

def build_paddle_program(self, target):
x = paddle.to_tensor(self.inputs["x"], stop_gradient=False)
batch_norm = paddle.nn.BatchNorm(
self.num_channels, act=None, is_test=False)
out = batch_norm(x)
for inputs in self.inputs:
x = paddle.to_tensor(inputs["x"], stop_gradient=False)
batch_norm = paddle.nn.BatchNorm(
inputs["num_channels"], act=None, is_test=False)
out = batch_norm(x)

self.paddle_outputs = [out]
self.paddle_grads = self.get_paddle_grads([out], [x],
[self.inputs["dout"]])
self.paddle_outputs.append(out)
grad = self.get_paddle_grads([out], [x], [inputs["dout"]])
self.paddle_grads.append(grad[0])

# Note: If the forward and backward operators are run in the same program,
# the forward result will be incorrect.
def build_cinn_program(self, target):
builder = NetBuilder("batch_norm")
x = builder.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x")
scale = builder.fill_constant([self.num_channels], 1.0, 'scale',
'float32')
bias = builder.fill_constant([self.num_channels], 0.0, 'bias',
'float32')
mean = builder.fill_constant([self.num_channels], 0.0, 'mean',
'float32')
variance = builder.fill_constant([self.num_channels], 1.0, 'variance',
for inputs in self.inputs:
builder = NetBuilder("batch_norm")
x = builder.create_input(
self.nptype2cinntype(inputs["x"].dtype), inputs["x"].shape,
"x")
scale = builder.fill_constant([inputs["num_channels"]], 1.0,
'scale', 'float32')
bias = builder.fill_constant([inputs["num_channels"]], 0.0, 'bias',
'float32')

out = builder.batchnorm(x, scale, bias, mean, variance, is_test=False)

prog = builder.build()
forward_res = self.get_cinn_output(
prog, target, [x], [self.inputs["x"]], out, passes=[])
self.cinn_outputs = [forward_res[0]]

builder_grad = NetBuilder("batch_norm_grad")
dout = builder_grad.create_input(
self.nptype2cinntype(self.inputs["dout"].dtype),
self.inputs["dout"].shape, "dout")
x_g = builder_grad.create_input(
self.nptype2cinntype(self.inputs["x"].dtype),
self.inputs["x"].shape, "x_g")
scale_g = builder_grad.fill_constant(scale.shape(), 1.0, 'scale_g',
'float32')
save_mean = builder_grad.create_input(
self.nptype2cinntype('float32'), out[1].shape(), "save_mean")
save_variance = builder_grad.create_input(
self.nptype2cinntype('float32'), out[2].shape(), "save_variance")

out_grad = builder_grad.batch_norm_grad(dout, x_g, scale_g, save_mean,
save_variance)
prog = builder_grad.build()
backward_res = self.get_cinn_output(
prog,
target, [dout, x_g, save_mean, save_variance], [
self.inputs["dout"], self.inputs["x"], forward_res[1],
forward_res[2]
],
out_grad,
passes=[])
self.cinn_grads = [backward_res[0]]
mean = builder.fill_constant([inputs["num_channels"]], 0.0, 'mean',
'float32')
variance = builder.fill_constant([inputs["num_channels"]], 1.0,
'variance', 'float32')

out = builder.batchnorm(
x, scale, bias, mean, variance, is_test=False)

prog = builder.build()
forward_res = self.get_cinn_output(
prog, target, [x], [inputs["x"]], out, passes=[])
self.cinn_outputs.append(forward_res[0])

builder_grad = NetBuilder("batch_norm_grad")
dout = builder_grad.create_input(
self.nptype2cinntype(inputs["dout"].dtype),
inputs["dout"].shape, "dout")
x_g = builder_grad.create_input(
self.nptype2cinntype(inputs["x"].dtype), inputs["x"].shape,
"x_g")
scale_g = builder_grad.fill_constant(scale.shape(), 1.0, 'scale_g',
'float32')
save_mean = builder_grad.create_input(
self.nptype2cinntype('float32'), out[1].shape(), "save_mean")
save_variance = builder_grad.create_input(
self.nptype2cinntype('float32'), out[2].shape(),
"save_variance")

out_grad = builder_grad.batch_norm_grad(dout, x_g, scale_g,
save_mean, save_variance)
prog = builder_grad.build()
backward_res = self.get_cinn_output(
prog,
target, [dout, x_g, save_mean, save_variance],
[inputs["dout"], inputs["x"], forward_res[1], forward_res[2]],
out_grad,
passes=[])
self.cinn_grads.append(backward_res[0])

def test_check_results(self):
self.check_outputs_and_grads()


class TestBatchNormBackwardFP16(TestBatchNormBackwardOp):
class TestBatchNormBackwardAll(TestBatchNormBackwardOp):
def init_case(self):
self.num_channels = 16
self.inputs = {
"x":
self.random([2, self.num_channels, 8, 8], "float16", 0.0, 10.0),
"dout":
self.random([2, self.num_channels, 8, 8], "float16", 1e-7, 1e-6),
}
self.inputs = []
for x_shape in [[2, 16, 8, 8], [2, 16, 8, 1], [2, 16, 2048, 8]]:
for x_type in ["float16", "float32"]:
self.inputs.append({
"x":
self.random(x_shape, x_type, 0.0, 1.0),
"dout":
self.random(x_shape, x_type, 1e-7, 1e-6),
"num_channels":
x_shape[1]
})

def test_check_results(self):
self.check_outputs_and_grads(max_relative_error=1e-3)
Expand Down