Skip to content

Commit

Permalink
Ensuring that the per second metrics are numeric non negative
Browse files Browse the repository at this point in the history
  • Loading branch information
filipecosta90 committed Nov 7, 2024
1 parent de5019a commit bfe511c
Showing 1 changed file with 22 additions and 22 deletions.
44 changes: 22 additions & 22 deletions tests/tests_oss_simple_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,9 @@ def test_default_set(env):
set_tx = int(set_tx_column_data[col_pos])
set_rx = int(set_rx_column_data[col_pos])
set_tx_rx = int(set_tx_rx_column_data[col_pos])
env.assertTrue(set_tx > 0)
env.assertTrue(set_rx > 0)
env.assertTrue(set_tx_rx > 0)
env.assertTrue(set_tx >= 0)
env.assertTrue(set_rx >= 0)
env.assertTrue(set_tx_rx >= 0)
env.assertAlmostEqual(set_tx_rx,set_tx+set_rx,1)

# the GET bw should be 0
Expand Down Expand Up @@ -179,7 +179,7 @@ def test_default_set(env):
# assert the metric value is non zero on writes and zero on reads
set_metric_value_kbs = set_metrics[metric_name]
get_metric_value_kbs = get_metrics[metric_name]
env.assertTrue(set_metric_value_kbs > 0)
env.assertTrue(set_metric_value_kbs >= 0)
env.assertTrue(get_metric_value_kbs == 0)

for second_data in set_metrics_ts.values():
Expand All @@ -188,8 +188,8 @@ def test_default_set(env):
count = second_data["Count"]
# if we had commands on that second the BW needs to be > 0
if count > 0:
env.assertTrue(bytes_rx > 0)
env.assertTrue(bytes_tx > 0)
env.assertTrue(bytes_rx >= 0)
env.assertTrue(bytes_tx >= 0)

for second_data in get_metrics_ts.values():
bytes_rx = second_data["Bytes RX"]
Expand Down Expand Up @@ -240,8 +240,8 @@ def test_default_set_get(env):
# assert the metric value is non zero given we've had write and read
set_metric_value_kbs = set_metrics[metric_name]
get_metric_value_kbs = get_metrics[metric_name]
env.assertTrue(set_metric_value_kbs > 0)
env.assertTrue(get_metric_value_kbs > 0)
env.assertTrue(set_metric_value_kbs >= 0)
env.assertTrue(get_metric_value_kbs >= 0)

for second_data in set_metrics_ts.values():
bytes_rx = second_data["Bytes RX"]
Expand All @@ -252,11 +252,11 @@ def test_default_set_get(env):
p50 = second_data["p50.00"]
p99 = second_data["p99.00"]
p999 = second_data["p99.90"]
env.assertTrue(bytes_rx > 0)
env.assertTrue(bytes_tx > 0)
env.assertTrue(p50 > 0.0)
env.assertTrue(p99 > 0.0)
env.assertTrue(p999 > 0.0)
env.assertTrue(bytes_rx >= 0)
env.assertTrue(bytes_tx >= 0)
env.assertTrue(p50 >= 0.0)
env.assertTrue(p99 >= 0.0)
env.assertTrue(p999 >= 0.0)

for second_data in get_metrics_ts.values():
bytes_rx = second_data["Bytes RX"]
Expand All @@ -267,11 +267,11 @@ def test_default_set_get(env):
p50 = second_data["p50.00"]
p99 = second_data["p99.00"]
p999 = second_data["p99.90"]
env.assertTrue(bytes_rx > 0)
env.assertTrue(bytes_tx > 0)
env.assertTrue(p50 > 0.0)
env.assertTrue(p99 > 0.0)
env.assertTrue(p999 > 0.0)
env.assertTrue(bytes_rx >= 0)
env.assertTrue(bytes_tx >= 0)
env.assertTrue(p50 >= 0.0)
env.assertTrue(p99 >= 0.0)
env.assertTrue(p999 >= 0.0)

def test_default_set_get_with_print_percentiles(env):
p_str = '0,10,20,30,40,50,60,70,80,90,95,100'
Expand Down Expand Up @@ -434,7 +434,7 @@ def test_default_arbitrary_command_keyless(env):
addTLSArgs(benchmark_specs, env)
# on arbitrary command args should be the last one
benchmark_specs["args"].append('--command=PING')
config = get_default_memtier_config()
config = get_default_memtier_config(10,5,10000)
master_nodes_list = env.getMasterNodesList()

add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)
Expand Down Expand Up @@ -478,8 +478,8 @@ def test_default_arbitrary_command_keyless(env):
env.assertTrue(metric_value_second_data > 0.0)
# if we had commands on that second the BW needs to be > 0
if count > 0:
env.assertTrue(bytes_rx > 0)
env.assertTrue(bytes_tx > 0)
env.assertTrue(bytes_rx >= 0)
env.assertTrue(bytes_tx >= 0)


def test_default_arbitrary_command_set(env):
Expand Down Expand Up @@ -714,4 +714,4 @@ def test_valid_json_using_debug_command(env):
if count > 0:
for latency_metric_name in ["Accumulated Latency","Min Latency","Max Latency","p50.00","p99.00","p99.90"]:
metric_value = second_data[latency_metric_name]
env.assertTrue(metric_value > 0.0)
env.assertTrue(metric_value >= 0.0)

0 comments on commit bfe511c

Please sign in to comment.