From 03c464798a915e6801cc41ea2a9359dd3992e98c Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Fri, 15 May 2020 16:41:37 +0800 Subject: [PATCH] Polish codes of basic benchmark class and update tensorflow version to 2.0 in ci (#405) * Add log and tar file to gitignore. * Polish codes of paddle and tensorflow basic benchmark implementation, remove some unused codes. * Update the tf version in CI and only check the modified .py file. --- .gitignore | 3 + api/common/api_param.py | 48 +++++++-- api/common/paddle_api_benchmark.py | 140 +++++-------------------- api/common/tensorflow_api_benchmark.py | 134 ++++++++++++----------- api/common/utils.py | 77 ++++---------- api/tests/main.py | 66 ++++++------ api/tests/run.sh | 7 +- scripts/run_test.sh | 5 +- 8 files changed, 198 insertions(+), 282 deletions(-) diff --git a/.gitignore b/.gitignore index 1a594f4f8d..33ccc3f533 100644 --- a/.gitignore +++ b/.gitignore @@ -5,8 +5,11 @@ __MACOSX *~ __pycache__ log* +*.log *.swp *.dot *.profile *.timeline *.npz +*.tar +*.tar.gz diff --git a/api/common/api_param.py b/api/common/api_param.py index 2d333cfd02..5e3525e3e7 100644 --- a/api/common/api_param.py +++ b/api/common/api_param.py @@ -14,6 +14,7 @@ import os import json +import numpy as np def parse_list(value_str, sub_dtype="int"): @@ -88,31 +89,54 @@ class APIConfig(object): def __init__(self, op_type, params=None): self.name = op_type self.params = params - self.variable_list = [] - self.params_list = [] + self.variable_list = None + self.params_list = None self.backward = False self.feed_spec = None self.atol = 1e-6 self.run_tf = True + def alias_filename(self, filename): + """ + Get the filename of alias config. + If self.name = a, self.alias_config.name = b, the filename should be "dir/a.json", + the filename of alias config will be "dir/b.json". + """ + if hasattr(self, "alias_config"): + dirname = os.path.dirname(filename) + basename = os.path.basename(filename) + basename = basename.replace(self.name, self.alias_config.name) + return os.path.join(dirname, basename) + return filename + + @property + def alias_name(self): + if hasattr(self, "alias_config"): + return self.alias_config.name + else: + return self.name + + @property + def alias(self): + if hasattr(self, "alias_config"): + return self.alias_config + else: + return self + def init_from_json(self, filename, config_id=0): if hasattr(self, "alias_config"): - json_file = filename - dir = os.path.dirname(json_file) - file_name = os.path.basename(json_file) - end = file_name.split('.') - filename = dir + '/' + self.alias_config.name + '.' + end[1] - self.alias_config.init_from_json(filename, config_id) + self.alias_config.init_from_json( + self.alias_filename(filename), config_id) return self print("---- Initialize APIConfig from %s, config_id = %d.\n" % (filename, config_id)) with open(filename, 'r') as f: data = json.load(f) - assert data[config_id][ - "op"] == self.name, "The op type (%s) in json file is different from the name (%s). " \ + op = data[config_id]["op"] + assert op == self.name or op == self.alias_name, "The op type (%s) in json file is different from the name (%s) and the alias name (%s). " \ "The filename: %s, config_id: %d." % ( - data[config_id]["op"], self.name, filename, config_id) + op, self.name, self.alias_name, filename, config_id) self.params = data[config_id]["param_info"] if data[config_id].get("atol", None) is not None: if isinstance(data[config_id]["atol"], str): @@ -154,6 +178,8 @@ def __str__(self): return debug_str def _parse_params(self): + self.variable_list = [] + self.params_list = [] for name, value in self.params.items(): assert value.get("type", None) is not None if value["type"] == "Variable": diff --git a/api/common/paddle_api_benchmark.py b/api/common/paddle_api_benchmark.py index 2816d729fe..89ddd5ede4 100644 --- a/api/common/paddle_api_benchmark.py +++ b/api/common/paddle_api_benchmark.py @@ -34,7 +34,7 @@ def profile_context(name, use_gpu, profiler): if profiler in ["Default", "OpDetail", "AllOpDetail"]: profile_type = "All" if use_gpu else "CPU" - output_file = "./outputs/" + name + ".profile" + output_file = "./outputs/" + name + ".pd.profile" with fluid.profiler.profiler( profile_type, 'total', output_file, tracer_option=profiler): yield @@ -85,12 +85,17 @@ def __init__(self): self.place = None self.feed_vars = None self.fetch_vars = None - self.feed_tensors = {} @abc.abstractmethod def build_program(self, config=None): pass + def layers(self, name, **kwargs): + module = importlib.import_module("paddle.fluid.layers") + func = getattr(module, name) + result = func(**kwargs) + return result + def create_program(self): self.main_program = fluid.Program() self.startup_program = fluid.Program() @@ -120,24 +125,28 @@ def run_with_executor(self, executor = fluid.Executor(self.place) executor.run(self.startup_program) - if feed is None: - feed = self._feed_random_data(use_gpu, as_lodtensor=False) + def _run_main_iter(feed=None): + outputs = executor.run(program=self.main_program, + feed=feed, + fetch_list=self.fetch_vars, + use_program_cache=True, + return_numpy=True) + return outputs + + # warmup run + outputs = _run_main_iter(feed=feed) runtimes = [] fetches = [] outputs = None with profile_context(self.name, use_gpu, profiler): - for i in xrange(repeat): + for i in range(repeat): begin = time.time() - outputs = executor.run(program=self.main_program, - feed=feed, - fetch_list=self.fetch_vars, - use_program_cache=True, - return_numpy=True) - end = time.time() - runtimes.append(end - begin) + outputs = _run_main_iter(feed=feed) + runtimes.append(time.time() - begin) if check_output: fetches.append(outputs) + if check_output: stable, max_diff = self._check_consistency(fetches) stats = {"total": runtimes, "stable": stable, "diff": max_diff} @@ -147,110 +156,9 @@ def run_with_executor(self, stats["version"] = paddle.__version__ stats["name"] = self.name stats["device"] = "GPU" if use_gpu else "CPU" - utils.print_stat(stats, log_level=log_level) - return outputs - - def run_with_core_executor(self, - use_gpu, - feed=None, - repeat=1, - log_level=0, - check_output=False, - profiler="none"): - self.place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - executor = fluid.Executor(self.place) - executor.run(self.startup_program) - - # Use to run main_program - place = fluid.core.Place() - place.set_place(self.place) - core_executor = fluid.core.Executor(place) - - fetch_list_str = [] - for var in self.fetch_vars: - fetch_list_str.append(var.name) - ctx = core_executor.prepare(self.main_program.desc, 0, fetch_list_str, - False) - core_executor.create_variables(self.main_program.desc, self.scope, 0) - - if feed is None: - feed = self._feed_random_data(use_gpu, as_lodtensor=False) - - feed_times = [] - fetch_times = [] - compute_times = [] - runtimes = [] - fetches = [] - outputs = None - with profile_context(self.name, use_gpu, profiler): - for i in xrange(repeat): - begin = time.time() - self._init_feed_tensor(feed) - feed_end = time.time() - core_executor.run_prepared_ctx(ctx, self.scope, False, False, - False) - compute_end = time.time() - outputs = self._get_fetch_tensor() - fetch_end = time.time() - - runtimes.append(fetch_end - begin) - feed_times.append(feed_end - begin) - compute_times.append(compute_end - feed_end) - fetch_times.append(fetch_end - compute_end) - - if check_output: - fetches.append(outputs) - if check_output: - stable, max_diff = self._check_consistency(fetches) - stats = { - "total": runtimes, - "feed": feed_times, - "compute": compute_times, - "fetch": fetch_times, - "stable": stable, - "diff": max_diff - } - else: - stats = { - "total": runtimes, - "feed": feed_times, - "compute": compute_times, - "fetch": fetch_times - } - stats["framework"] = "paddle" - stats["version"] = paddle.__version__ - stats["name"] = self.name - stats["device"] = "GPU" if use_gpu else "CPU" - utils.print_stat(stats, log_level=log_level) + utils.print_benchmark_result(stats, log_level=log_level) return outputs - def layers(self, api, **kwargs): - module = importlib.import_module("paddle.fluid.layers") - api_paddle = getattr(module, api) - result = api_paddle(**kwargs) - return result - - def _feed_random_data(self, use_gpu, as_lodtensor=False): - print("feed random data") - feed = {} - if use_gpu and as_lodtensor: - place = fluid.CPUPlace() - #place = fluid.CUDAPinnedPlace() - for var in self.feed_vars: - if var.type != fluid.core.VarDesc.VarType.LOD_TENSOR: - raise TypeError("Feed data of non LoDTensor is not supported.") - - shape = var.shape - dtype = convert_dtype(var.dtype, to_string=True) - data = np.random.random(shape).astype(dtype) - if use_gpu and as_lodtensor: - tensor = fluid.core.LoDTensor() - tensor.set(data, place) - feed[var.name] = tensor - else: - feed[var.name] = data - return feed - def _init_feed_tensor(self, feed): for var in self.feed_vars: if var.type != fluid.core.VarDesc.VarType.LOD_TENSOR: @@ -314,11 +222,11 @@ def _self_check(output): repeat = len(fetches) num_outputs = len(fetches[0]) max_diff = 0.0 - for j in xrange(num_outputs): + for j in range(num_outputs): if not stable: break output_0 = None - for i in xrange(repeat): + for i in range(repeat): try: output_i = _self_check(fetches[i][j]) if i == 0: diff --git a/api/common/tensorflow_api_benchmark.py b/api/common/tensorflow_api_benchmark.py index 68314bc5a8..2064c13fef 100644 --- a/api/common/tensorflow_api_benchmark.py +++ b/api/common/tensorflow_api_benchmark.py @@ -33,6 +33,49 @@ "Cannot import tensorflow, maybe tensorflow is not installed.\n") +class Profiler(object): + def __init__(self, name, sess, profile): + self.name = name + self.sess = sess + self.profile = profile + self.profiler = None + self.run_options = None + self.run_metadata = None + self.generate_timeline = False + + def __enter__(self): + if self.profile: + self.profiler = model_analyzer.Profiler(graph=self.sess.graph) + if tf.__version__ < "1.15.0": + self.run_options = tf.RunOptions( + trace_level=tf.RunOptions.FULL_TRACE) + self.run_metadata = tf.RunMetadata() + else: + self.run_options = tf.compat.v1.RunOptions( + trace_level=tf.compat.v1.RunOptions.FULL_TRACE) + self.run_metadata = tf.compat.v1.RunMetadata() + return self + + def add_step(self, step): + if self.profile: + # Update profiler + self.profiler.add_step(step=step, run_meta=self.run_metadata) + if self.generate_timeline: + # For timeline + tl = timeline.Timeline(self.run_metadata.step_stats) + chrome_trace = tl.generate_chrome_trace_format() + trace_file = open(self.name + '.tf.timeline', 'w') + trace_file.write(chrome_trace) + + def __exit__(self, exception_type, exception_value, traceback): + if self.profile: + # Generate profiling result + profile_op_builder = option_builder.ProfileOptionBuilder().select( + ['micros', 'occurrence']).order_by('micros').with_max_depth(5) + self.profiler.profile_operations(profile_op_builder.build()) + return self + + def convert_dtype(dtype, to_string=True): def _trans(to_string, dtype_str, np_dtype): dtype = dtype_str if to_string else np.dtype(np_dtype) @@ -120,6 +163,12 @@ def placeholder(self, name, shape, dtype): var = tf.placeholder(name=name, shape=shape, dtype=tf_dtype) return var + def layers(self, name, **kwargs): + module = importlib.import_module("tensorflow") + func = getattr(module, name) + result = func(**kwargs) + return result + def append_gradients(self, targets, inputs): if isinstance(inputs, tf.Tensor): inputs = [inputs] @@ -142,62 +191,34 @@ def run(self, check_output=False, profile=False): sess = self._init_session(use_gpu) - #tf.debugging.set_log_device_placement(True) - if profile: - profiler = model_analyzer.Profiler(graph=sess.graph) - run_options = config_pb2.RunOptions( - trace_level=config_pb2.RunOptions.FULL_TRACE) - run_metadata = config_pb2.RunMetadata() - else: - profiler = None - run_options = None - run_metadata = None - self.timeline_dict = None - - if feed is None: - feed = self._feed_random_data() + #tf.debugging.set_log_device_placement(True) - runtimes = [] - fetches = [] - outputs = None - for i in range(repeat): - begin = time.time() + def _run_main_iter(feed=feed, run_options=None, run_metadata=None): outputs = sess.run(fetches=self.fetch_list, feed_dict=feed, options=run_options, run_metadata=run_metadata) - end = time.time() - runtimes.append(end - begin) + return outputs - if profile: - # Update profiler - profiler.add_step(step=i, run_meta=run_metadata) - # For timeline - tl = timeline.Timeline(run_metadata.step_stats) - chrome_trace = tl.generate_chrome_trace_format() - trace_file = open(self.name + '_tf.timeline', 'w') - trace_file.write(chrome_trace) - #self._update_timeline(chrome_trace) + # warmup run + _run_main_iter(feed=feed, run_options=None, run_metadata=None) - if check_output: - fetches.append(outputs) - if profile: - # Generate profiling result - profile_op_builder = option_builder.ProfileOptionBuilder() - profile_op_builder.select(['micros', 'occurrence']) - profile_op_builder.order_by('micros') - profile_op_builder.with_max_depth(10) - profiler.profile_operations(profile_op_builder.build()) - # Generate timeline - # profile_graph_builder = option_builder.ProfileOptionBuilder( - # option_builder.ProfileOptionBuilder.time_and_memory()) - # profile_graph_builder.with_timeline_output(timeline_file=self.name + '_tf.timeline') - # profile_graph_builder.with_step(10) - # profiler.profile_graph(profile_graph_builder.build()) - #tl_output_file = self.name + "_tf.timeline" - #with open(tl_output_file, 'w') as f: - # json.dump(self.timeline_dict, f) + runtimes = [] + fetches = [] + outputs = None + with Profiler(self.name, sess, profile) as prof: + for i in range(repeat): + begin = time.time() + outputs = _run_main_iter( + feed=feed, + run_options=prof.run_options, + run_metadata=prof.run_metadata) + runtimes.append(time.time() - begin) + prof.add_step(step=i) + + if check_output: + fetches.append(outputs) stats = { "framework": "tensorflow", @@ -206,15 +227,9 @@ def run(self, "total": runtimes } stats["device"] = "GPU" if use_gpu else "CPU" - utils.print_stat(stats, log_level=log_level) + utils.print_benchmark_result(stats, log_level=log_level) return outputs - def layers(self, api, **kwargs): - module = importlib.import_module("tensorflow") - api_tf = getattr(module, api) - result = api_tf(**kwargs) - return result - def _init_session(self, use_gpu): if tf.__version__ >= "1.15.0": config = tf.compat.v1.ConfigProto() @@ -227,12 +242,3 @@ def _init_session(self, use_gpu): sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) return sess - - def _feed_random_data(self): - print("feed random data") - feed = {} - for var in self.feed_list: - shape = var.shape - dtype = self.convert_dtype(var.dtype, to_string=True) - feed[var] = np.random.random(shape).astype(dtype) - return feed diff --git a/api/common/utils.py b/api/common/utils.py index 2a1170a582..36f29aa46c 100644 --- a/api/common/utils.py +++ b/api/common/utils.py @@ -39,7 +39,7 @@ def compare(output1, output2, atol): return max_diff, offset -def check_outputs(list1, list2, name=None, atol=1e-6): +def check_outputs(list1, list2, name, atol=1e-6): if not isinstance(list1, list) or not isinstance(list2, list): raise TypeError( "input argument's type should be list of numpy.ndarray.") @@ -66,8 +66,7 @@ def check_outputs(list1, list2, name=None, atol=1e-6): consistent = False status = collections.OrderedDict() - if name is not None: - status["name"] = name + status["name"] = name status["consistent"] = consistent status["num_outputs"] = num_outputs status["diff"] = max_diff.astype("float") @@ -85,44 +84,16 @@ def check_outputs(list1, list2, name=None, atol=1e-6): print(json.dumps(status)) -def get_stat(stats, key): - if stats.get(key, None) is None: - value = None - else: - value = stats[key] - return value - - -def calc_avg_time(times, begin, end): - if times is not None: - if not isinstance(times, list): - raise TypeError("Input times should be a list.") - sorted_times = np.sort(times) - avg_time = np.average(sorted_times[begin:end]) - else: - avg_time = 0.0 - return avg_time - +def print_benchmark_result(result, log_level=0): + if not isinstance(result, dict): + raise TypeError("Input result should be a dict.") -def print_stat(stats, log_level=0): - if not isinstance(stats, dict): - raise TypeError("Input stats should be a dict.") + runtimes = result.get("total", None) + stable = result.get("stable", None) + diff = result.get("diff", None) - runtimes = stats["total"] - feed_times = get_stat(stats, "feed") - fetch_times = get_stat(stats, "fetch") - compute_times = get_stat(stats, "compute") - stable = get_stat(stats, "stable") - diff = get_stat(stats, "diff") - - for i in xrange(len(runtimes)): + for i in range(len(runtimes)): runtimes[i] *= 1000 - if feed_times is not None: - feed_times[i] *= 1000 - if fetch_times is not None: - fetch_times[i] *= 1000 - if compute_times is not None: - compute_times[i] *= 1000 sorted_runtimes = np.sort(runtimes) if len(sorted_runtimes) <= 2: @@ -139,10 +110,6 @@ def print_stat(stats, log_level=0): end = len(sorted_runtimes) - 10 avg_runtime = np.average(sorted_runtimes[begin:end]) - avg_feed_time = calc_avg_time(feed_times, begin, end) - avg_fetch_time = calc_avg_time(fetch_times, begin, end) - avg_compute_time = calc_avg_time(compute_times, begin, end) - if log_level == 0: seg_0 = 0 seg_1 = len(runtimes) @@ -158,16 +125,18 @@ def print_stat(stats, log_level=0): print("Iter {0}, Runtime: {1}".format("%4d" % i, "%.5f ms" % runtimes[i])) - print("{") - print(" framework: \"%s\"," % stats["framework"]) - print(" version: \"%s\"," % stats["version"]) - print(" name: \"%s\"," % stats["name"]) - print(" device: \"%s\"," % stats["device"]) + status = collections.OrderedDict() + status["framework"] = result["framework"] + status["version"] = result["version"] + status["name"] = result["name"] + status["device"] = result["device"] if stable is not None and diff is not None: - print(" precision: { stable: \"%s\", diff: %.5f }," % - (str(stable), diff)) - print( - " speed: { repeat: %d, start: %d, end: %d, total: %.5f, feed: %.5f, compute: %.5f, fetch: %.5f }" - % (len(sorted_runtimes), begin, end, avg_runtime, avg_feed_time, - avg_compute_time, avg_fetch_time)) - print("}") + status["precision"] = collections.OrderedDict() + status["precision"]["stable"] = stable + status["precision"]["diff"] = diff + status["speed"] = collections.OrderedDict() + status["speed"]["repeat"] = len(sorted_runtimes) + status["speed"]["begin"] = begin + status["speed"]["end"] = end + status["speed"]["total"] = avg_runtime + print(json.dumps(status)) diff --git a/api/tests/main.py b/api/tests/main.py index 0bfc962d06..7189ac5373 100644 --- a/api/tests/main.py +++ b/api/tests/main.py @@ -58,11 +58,6 @@ def parse_args(): default=None, help='Only import params of API from json file in the specified position [0|1|...]' ) - parser.add_argument( - '--run_with_executor', - type=str2bool, - default=True, - help='Whether running with executor [True|False]') parser.add_argument( '--check_output', type=str2bool, @@ -182,13 +177,14 @@ def copy_feed_spec(config=None): def test_main(pd_obj=None, tf_obj=None, config=None): - if config is None: - raise ValueError("API config must be set.") + assert config is not None, "API config must be set." args = parse_args() if args.json_file is not None: + # Set the filename to alias config's filename, when there is a alias config. + filename = config.alias_filename(args.json_file) if args.config_id is not None and args.config_id >= 0: - config.init_from_json(args.json_file, args.config_id) + config.init_from_json(filename, args.config_id) if args.api_name != None: API_s = args.api_name.split(',') for api in API_s: @@ -198,14 +194,6 @@ def test_main(pd_obj=None, tf_obj=None, config=None): test_main_without_json(pd_obj, tf_obj, config) else: num_configs = 0 - if hasattr(config, "alias_config"): - json_file = args.json_file - dir = os.path.dirname(json_file) - file_name = os.path.basename(json_file) - end = file_name.split('.') - filename = dir + '/' + config.alias_config.name + '.' + end[1] - else: - filename = args.json_file with open(filename, 'r') as f: num_configs = len(json.load(f)) for config_id in range(0, num_configs): @@ -221,17 +209,30 @@ def test_main(pd_obj=None, tf_obj=None, config=None): test_main_without_json(pd_obj, tf_obj, config) +def _is_paddle_enabled(args, config): + if args.task == "accuracy" or args.framework in ["paddle", "both"]: + return True + return False + + +def _is_tensorflow_enabled(args, config): + if config.run_tf: + if args.task == "accuracy" or args.framework in [ + "tensorflow", "tf", "both" + ]: + return True + return False + + def test_main_without_json(pd_obj=None, tf_obj=None, config=None): - if config is None: - raise ValueError("API config must be set.") + assert config is not None, "API config must be set." args = parse_args() config.backward = args.backward feed_spec = copy_feed_spec(config) feed_list = None - if args.task == "accuracy" or args.framework in ["paddle", "both"]: - if pd_obj is None: - raise ValueError("Paddle object is None.") + if _is_paddle_enabled(args, config): + assert pd_obj is not None, "Paddle object is None." print(config) pd_obj.name = config.name pd_obj.create_program() @@ -239,24 +240,21 @@ def test_main_without_json(pd_obj=None, tf_obj=None, config=None): feed_list = feeder.feed_paddle(pd_obj, feed_spec=feed_spec) pd_outputs = run_paddle(args.task, pd_obj, args, feed_list) - if args.task == "accuracy" or args.framework in [ - "tensorflow", "tf", "both" - ]: - if tf_obj is None: - raise ValueError("TensorFlow object is None.") + if _is_tensorflow_enabled(args, config): + assert tf_obj is not None, "TensorFlow object is None." tf_config = config.to_tensorflow() print(tf_config) warnings.simplefilter('always', UserWarning) - if tf_config.run_tf: - tf_obj.name = tf_config.name - tf_obj.build_graph(config=tf_config) - feed_list = feeder.feed_tensorflow( - tf_obj, feed_list, feed_spec=feed_spec) - tf_outputs = run_tensorflow(args.task, tf_obj, args, feed_list) - else: - warnings.warn("This config is not supported by TensorFlow.") + tf_obj.name = tf_config.name + tf_obj.build_graph(config=tf_config) + feed_list = feeder.feed_tensorflow( + tf_obj, feed_list, feed_spec=feed_spec) + tf_outputs = run_tensorflow(args.task, tf_obj, args, feed_list) if args.task == "accuracy": if tf_config.run_tf: utils.check_outputs( pd_outputs, tf_outputs, name=pd_obj.name, atol=config.atol) + else: + warnings.simplefilter('always', UserWarning) + warnings.warn("This config is not supported by TensorFlow.") diff --git a/api/tests/run.sh b/api/tests/run.sh index 291ebe875e..33736d167d 100755 --- a/api/tests/run.sh +++ b/api/tests/run.sh @@ -4,6 +4,12 @@ export CUDA_VISIBLE_DEVICES="1" #export GLOG_v=4 #export LD_LIBRARY_PATH=/work/cudnn/cudnn-7.6.5/lib64:${LD_LIBRARY_PATH} +NVCC=`which nvcc` +if [ ${NVCC} != "" ]; then + NVCC_VERSION=`nvcc --version | tail -n 1 | grep "[0-9][0-9]*\.[0-9]" -o | uniq` + export LD_LIBRARY_PATH=/usr/local/cuda-${NVCC_VERSION}/extras/CUPTI/lib64:${LD_LIBRARY_PATH} +fi + name=${1:-"abs"} config_id=${2:-"0"} filename="examples/${name}.json" @@ -13,7 +19,6 @@ python ${name}.py \ --framework "paddle" \ --json_file ${filename} \ --config_id ${config_id} \ - --run_with_executor True \ --check_output False \ --profiler "none" \ --backward False \ diff --git a/scripts/run_test.sh b/scripts/run_test.sh index cb72ea524f..91b40b6549 100644 --- a/scripts/run_test.sh +++ b/scripts/run_test.sh @@ -28,7 +28,8 @@ BENCHMARK_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/.." && pwd )" echo ${BENCHMARK_ROOT} function prepare_tf_env(){ - pip install tensorflow-gpu==1.15 pre-commit==1.21 pylint==1.9.5 pytest==4.6.9 + pip install tensorflow-gpu==2.0 pre-commit==1.21 pylint==1.9.5 pytest==4.6.9 + python -c "import tensorflow as tf; print(tf.__version__)" apt-get update apt-get install -y git } @@ -52,7 +53,7 @@ function fetch_upstream_master_if_not_exist() { function run_api(){ fetch_upstream_master_if_not_exist cd ${BENCHMARK_ROOT}/api/tests - HAS_MODIFIED_API_TEST=`git diff --name-only upstream/$BRANCH | grep "api/tests" || true` + HAS_MODIFIED_API_TEST=`git diff --name-only upstream/$BRANCH | grep "api/tests.*.py$" || true` API_NAMES=(abs fc) if [ "${HAS_MODIFIED_API_TEST}" != "" ] ; then for api in ${HAS_MODIFIED_API_TEST[@]}; do