Skip to content

Commit

Permalink
Polish codes of basic benchmark class and update tensorflow version t…
Browse files Browse the repository at this point in the history
…o 2.0 in ci (PaddlePaddle#405)

* Add log and tar file to gitignore.

* Polish codes of paddle and tensorflow basic benchmark implementation, remove some unused codes.

* Update the tf version in CI and only check the modified .py file.
  • Loading branch information
Xreki authored May 15, 2020
1 parent 07639a7 commit 03c4647
Show file tree
Hide file tree
Showing 8 changed files with 198 additions and 282 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,11 @@ __MACOSX
*~
__pycache__
log*
*.log
*.swp
*.dot
*.profile
*.timeline
*.npz
*.tar
*.tar.gz
48 changes: 37 additions & 11 deletions api/common/api_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

import os
import json
import numpy as np


def parse_list(value_str, sub_dtype="int"):
Expand Down Expand Up @@ -88,31 +89,54 @@ class APIConfig(object):
def __init__(self, op_type, params=None):
self.name = op_type
self.params = params
self.variable_list = []
self.params_list = []
self.variable_list = None
self.params_list = None
self.backward = False
self.feed_spec = None
self.atol = 1e-6
self.run_tf = True

def alias_filename(self, filename):
"""
Get the filename of alias config.
If self.name = a, self.alias_config.name = b, the filename should be "dir/a.json",
the filename of alias config will be "dir/b.json".
"""
if hasattr(self, "alias_config"):
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
basename = basename.replace(self.name, self.alias_config.name)
return os.path.join(dirname, basename)
return filename

@property
def alias_name(self):
if hasattr(self, "alias_config"):
return self.alias_config.name
else:
return self.name

@property
def alias(self):
if hasattr(self, "alias_config"):
return self.alias_config
else:
return self

def init_from_json(self, filename, config_id=0):
if hasattr(self, "alias_config"):
json_file = filename
dir = os.path.dirname(json_file)
file_name = os.path.basename(json_file)
end = file_name.split('.')
filename = dir + '/' + self.alias_config.name + '.' + end[1]
self.alias_config.init_from_json(filename, config_id)
self.alias_config.init_from_json(
self.alias_filename(filename), config_id)
return self

print("---- Initialize APIConfig from %s, config_id = %d.\n" %
(filename, config_id))
with open(filename, 'r') as f:
data = json.load(f)
assert data[config_id][
"op"] == self.name, "The op type (%s) in json file is different from the name (%s). " \
op = data[config_id]["op"]
assert op == self.name or op == self.alias_name, "The op type (%s) in json file is different from the name (%s) and the alias name (%s). " \
"The filename: %s, config_id: %d." % (
data[config_id]["op"], self.name, filename, config_id)
op, self.name, self.alias_name, filename, config_id)
self.params = data[config_id]["param_info"]
if data[config_id].get("atol", None) is not None:
if isinstance(data[config_id]["atol"], str):
Expand Down Expand Up @@ -154,6 +178,8 @@ def __str__(self):
return debug_str

def _parse_params(self):
self.variable_list = []
self.params_list = []
for name, value in self.params.items():
assert value.get("type", None) is not None
if value["type"] == "Variable":
Expand Down
140 changes: 24 additions & 116 deletions api/common/paddle_api_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
def profile_context(name, use_gpu, profiler):
if profiler in ["Default", "OpDetail", "AllOpDetail"]:
profile_type = "All" if use_gpu else "CPU"
output_file = "./outputs/" + name + ".profile"
output_file = "./outputs/" + name + ".pd.profile"
with fluid.profiler.profiler(
profile_type, 'total', output_file, tracer_option=profiler):
yield
Expand Down Expand Up @@ -85,12 +85,17 @@ def __init__(self):
self.place = None
self.feed_vars = None
self.fetch_vars = None
self.feed_tensors = {}

@abc.abstractmethod
def build_program(self, config=None):
pass

def layers(self, name, **kwargs):
module = importlib.import_module("paddle.fluid.layers")
func = getattr(module, name)
result = func(**kwargs)
return result

def create_program(self):
self.main_program = fluid.Program()
self.startup_program = fluid.Program()
Expand Down Expand Up @@ -120,24 +125,28 @@ def run_with_executor(self,
executor = fluid.Executor(self.place)
executor.run(self.startup_program)

if feed is None:
feed = self._feed_random_data(use_gpu, as_lodtensor=False)
def _run_main_iter(feed=None):
outputs = executor.run(program=self.main_program,
feed=feed,
fetch_list=self.fetch_vars,
use_program_cache=True,
return_numpy=True)
return outputs

# warmup run
outputs = _run_main_iter(feed=feed)

runtimes = []
fetches = []
outputs = None
with profile_context(self.name, use_gpu, profiler):
for i in xrange(repeat):
for i in range(repeat):
begin = time.time()
outputs = executor.run(program=self.main_program,
feed=feed,
fetch_list=self.fetch_vars,
use_program_cache=True,
return_numpy=True)
end = time.time()
runtimes.append(end - begin)
outputs = _run_main_iter(feed=feed)
runtimes.append(time.time() - begin)
if check_output:
fetches.append(outputs)

if check_output:
stable, max_diff = self._check_consistency(fetches)
stats = {"total": runtimes, "stable": stable, "diff": max_diff}
Expand All @@ -147,110 +156,9 @@ def run_with_executor(self,
stats["version"] = paddle.__version__
stats["name"] = self.name
stats["device"] = "GPU" if use_gpu else "CPU"
utils.print_stat(stats, log_level=log_level)
return outputs

def run_with_core_executor(self,
use_gpu,
feed=None,
repeat=1,
log_level=0,
check_output=False,
profiler="none"):
self.place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
executor = fluid.Executor(self.place)
executor.run(self.startup_program)

# Use to run main_program
place = fluid.core.Place()
place.set_place(self.place)
core_executor = fluid.core.Executor(place)

fetch_list_str = []
for var in self.fetch_vars:
fetch_list_str.append(var.name)
ctx = core_executor.prepare(self.main_program.desc, 0, fetch_list_str,
False)
core_executor.create_variables(self.main_program.desc, self.scope, 0)

if feed is None:
feed = self._feed_random_data(use_gpu, as_lodtensor=False)

feed_times = []
fetch_times = []
compute_times = []
runtimes = []
fetches = []
outputs = None
with profile_context(self.name, use_gpu, profiler):
for i in xrange(repeat):
begin = time.time()
self._init_feed_tensor(feed)
feed_end = time.time()
core_executor.run_prepared_ctx(ctx, self.scope, False, False,
False)
compute_end = time.time()
outputs = self._get_fetch_tensor()
fetch_end = time.time()

runtimes.append(fetch_end - begin)
feed_times.append(feed_end - begin)
compute_times.append(compute_end - feed_end)
fetch_times.append(fetch_end - compute_end)

if check_output:
fetches.append(outputs)
if check_output:
stable, max_diff = self._check_consistency(fetches)
stats = {
"total": runtimes,
"feed": feed_times,
"compute": compute_times,
"fetch": fetch_times,
"stable": stable,
"diff": max_diff
}
else:
stats = {
"total": runtimes,
"feed": feed_times,
"compute": compute_times,
"fetch": fetch_times
}
stats["framework"] = "paddle"
stats["version"] = paddle.__version__
stats["name"] = self.name
stats["device"] = "GPU" if use_gpu else "CPU"
utils.print_stat(stats, log_level=log_level)
utils.print_benchmark_result(stats, log_level=log_level)
return outputs

def layers(self, api, **kwargs):
module = importlib.import_module("paddle.fluid.layers")
api_paddle = getattr(module, api)
result = api_paddle(**kwargs)
return result

def _feed_random_data(self, use_gpu, as_lodtensor=False):
print("feed random data")
feed = {}
if use_gpu and as_lodtensor:
place = fluid.CPUPlace()
#place = fluid.CUDAPinnedPlace()
for var in self.feed_vars:
if var.type != fluid.core.VarDesc.VarType.LOD_TENSOR:
raise TypeError("Feed data of non LoDTensor is not supported.")

shape = var.shape
dtype = convert_dtype(var.dtype, to_string=True)
data = np.random.random(shape).astype(dtype)
if use_gpu and as_lodtensor:
tensor = fluid.core.LoDTensor()
tensor.set(data, place)
feed[var.name] = tensor
else:
feed[var.name] = data
return feed

def _init_feed_tensor(self, feed):
for var in self.feed_vars:
if var.type != fluid.core.VarDesc.VarType.LOD_TENSOR:
Expand Down Expand Up @@ -314,11 +222,11 @@ def _self_check(output):
repeat = len(fetches)
num_outputs = len(fetches[0])
max_diff = 0.0
for j in xrange(num_outputs):
for j in range(num_outputs):
if not stable:
break
output_0 = None
for i in xrange(repeat):
for i in range(repeat):
try:
output_i = _self_check(fetches[i][j])
if i == 0:
Expand Down
Loading

0 comments on commit 03c4647

Please sign in to comment.