Skip to content

Commit

Permalink
Added the latency test (command taking longer than 1 second) to CI
Browse files Browse the repository at this point in the history
  • Loading branch information
filipecosta90 committed Nov 7, 2024
1 parent 21ede2f commit 5ce4f41
Show file tree
Hide file tree
Showing 2 changed files with 55 additions and 1 deletion.
2 changes: 1 addition & 1 deletion tests/run_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ TLS_CACERT=$ROOT/tests/tls/ca.crt
REDIS_SERVER=${REDIS_SERVER:-redis-server}
MEMTIER_BINARY=$ROOT/memtier_benchmark

RLTEST_ARGS=" --oss-redis-path $REDIS_SERVER"
RLTEST_ARGS=" --oss-redis-path $REDIS_SERVER --enable-debug-command"
[[ "$TEST" != "" ]] && RLTEST_ARGS+=" --test $TEST"
[[ $VERBOSE == 1 ]] && RLTEST_ARGS+=" -v"
[[ $TLS == 1 ]] && RLTEST_ARGS+=" --tls-cert-file $TLS_CERT --tls-key-file $TLS_KEY --tls-ca-cert-file $TLS_CACERT --tls"
Expand Down
54 changes: 54 additions & 0 deletions tests/tests_oss_simple_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,3 +670,57 @@ def test_data_import_setex(env):
merged_command_stats = {'cmdstat_setex': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)


def test_valid_json_using_debug_command(env):
benchmark_specs = {"name": env.testName, "args": []}
addTLSArgs(benchmark_specs, env)
# on arbitrary command args should be the last one
benchmark_specs["args"].append('--command=DEBUG SLEEP 2')
total_requests = 3
config = get_default_memtier_config(1,1,total_requests)
master_nodes_list = env.getMasterNodesList()

add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)

# Create a temporary directory
test_dir = tempfile.mkdtemp()

config = RunConfig(test_dir, env.testName, config, {})
ensure_clean_benchmark_folder(config.results_dir)

benchmark = Benchmark.from_json(config, benchmark_specs)

if not benchmark.run():
debugPrintMemtierOnError(config, env)

## Assert that all JSON BW metrics are properly stored and calculated
json_filename = '{0}/mb.json'.format(config.results_dir)
with open(json_filename) as results_json:
# ensure it's a valid json
results_dict = json.load(results_json)
debug_metrics = results_dict['ALL STATS']['Debugs']
debug_count = debug_metrics["Count"]
total_metrics = results_dict['ALL STATS']['Totals']
total_count = debug_metrics["Count"]
env.assertEqual(debug_count, total_count)
env.assertEqual(debug_count, total_requests)
debug_metrics_ts = debug_metrics["Time-Serie"]


for second_data in debug_metrics_ts.values():
count = second_data["Count"]
# if we had commands on that second the BW needs to be > 0
if count > 0:
for latency_metric_name in ["Accumulated Latency","Min Latency","Max Latency","p50.00","p99.00","p99.90"]:
metric_value = second_data[latency_metric_name]
env.assertTrue(metric_value > 0.0)

# for second_data in get_metrics_ts.values():
# bytes_rx = second_data["Bytes RX"]
# bytes_tx = second_data["Bytes TX"]
# # This test is write only so there should be no reads RX/TX and count
# count = second_data["Count"]
# env.assertTrue(count == 0)
# env.assertTrue(bytes_rx == 0)
# env.assertTrue(bytes_tx == 0)

0 comments on commit 5ce4f41

Please sign in to comment.