From 5ce4f41e85cea9ecce249d09f2fc1306e0de7ac2 Mon Sep 17 00:00:00 2001 From: filipecosta90 Date: Thu, 7 Nov 2024 16:01:36 +0000 Subject: [PATCH] Added the latency test (command taking longer than 1 second) to CI --- tests/run_tests.sh | 2 +- tests/tests_oss_simple_flow.py | 54 ++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/tests/run_tests.sh b/tests/run_tests.sh index 3e55c10d..7cf7577f 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -70,7 +70,7 @@ TLS_CACERT=$ROOT/tests/tls/ca.crt REDIS_SERVER=${REDIS_SERVER:-redis-server} MEMTIER_BINARY=$ROOT/memtier_benchmark -RLTEST_ARGS=" --oss-redis-path $REDIS_SERVER" +RLTEST_ARGS=" --oss-redis-path $REDIS_SERVER --enable-debug-command" [[ "$TEST" != "" ]] && RLTEST_ARGS+=" --test $TEST" [[ $VERBOSE == 1 ]] && RLTEST_ARGS+=" -v" [[ $TLS == 1 ]] && RLTEST_ARGS+=" --tls-cert-file $TLS_CERT --tls-key-file $TLS_KEY --tls-ca-cert-file $TLS_CACERT --tls" diff --git a/tests/tests_oss_simple_flow.py b/tests/tests_oss_simple_flow.py index bacc8da4..cefc8dd2 100644 --- a/tests/tests_oss_simple_flow.py +++ b/tests/tests_oss_simple_flow.py @@ -670,3 +670,57 @@ def test_data_import_setex(env): merged_command_stats = {'cmdstat_setex': {'calls': 0}, 'cmdstat_get': {'calls': 0}} overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats) assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count) + + +def test_valid_json_using_debug_command(env): + benchmark_specs = {"name": env.testName, "args": []} + addTLSArgs(benchmark_specs, env) + # on arbitrary command args should be the last one + benchmark_specs["args"].append('--command=DEBUG SLEEP 2') + total_requests = 3 + config = get_default_memtier_config(1,1,total_requests) + master_nodes_list = env.getMasterNodesList() + + add_required_env_arguments(benchmark_specs, config, env, master_nodes_list) + + # Create a temporary directory + test_dir = tempfile.mkdtemp() + + config = RunConfig(test_dir, env.testName, config, {}) + ensure_clean_benchmark_folder(config.results_dir) + + benchmark = Benchmark.from_json(config, benchmark_specs) + + if not benchmark.run(): + debugPrintMemtierOnError(config, env) + + ## Assert that all JSON BW metrics are properly stored and calculated + json_filename = '{0}/mb.json'.format(config.results_dir) + with open(json_filename) as results_json: + # ensure it's a valid json + results_dict = json.load(results_json) + debug_metrics = results_dict['ALL STATS']['Debugs'] + debug_count = debug_metrics["Count"] + total_metrics = results_dict['ALL STATS']['Totals'] + total_count = debug_metrics["Count"] + env.assertEqual(debug_count, total_count) + env.assertEqual(debug_count, total_requests) + debug_metrics_ts = debug_metrics["Time-Serie"] + + + for second_data in debug_metrics_ts.values(): + count = second_data["Count"] + # if we had commands on that second the BW needs to be > 0 + if count > 0: + for latency_metric_name in ["Accumulated Latency","Min Latency","Max Latency","p50.00","p99.00","p99.90"]: + metric_value = second_data[latency_metric_name] + env.assertTrue(metric_value > 0.0) + + # for second_data in get_metrics_ts.values(): + # bytes_rx = second_data["Bytes RX"] + # bytes_tx = second_data["Bytes TX"] + # # This test is write only so there should be no reads RX/TX and count + # count = second_data["Count"] + # env.assertTrue(count == 0) + # env.assertTrue(bytes_rx == 0) + # env.assertTrue(bytes_tx == 0) \ No newline at end of file