forked from anza-xyz/bench-tps-dos-test
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dos-report.sh
executable file
·390 lines (364 loc) · 13.2 KB
/
dos-report.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
#!/usr/bin/env bash
## solana-bench-tps config
set -ex
# read env
source "env-artifact.sh"
source "utils.sh"
# check ENV
# no env , exit
[[ ! $START_TIME ]]&& echo START_TIME env not found&&exit 1
[[ ! $START_TIME2 ]]&& echo START_TIME2 env not found&&exit 1
[[ ! $STOP_TIME ]]&& echo STOP_TIME env not found&&exit 1
[[ ! $STOP_TIME2 ]]&& echo STOP_TIME2 env not found&&exit 1
[[ ! $INFLUX_TOKEN ]]&& echo INFLUX_TOKEN env not found&&exit 1
[[ ! $INFLUX_HOST ]]&& echo INFLUX_HOST env not found&&exit 1
[[ ! $REPORT_BUCKET ]] && echo REPORT_BUCKET env not found&&exit 1
# set large data set
[[ ! $LARGE_DATA_SET ]] && LARGE_DATA_SET="false"
if [[ -n $BUILDKITE_BUILD_URL ]] ; then
BUILD_BUTTON_TEXT="Build Kite Job"
else
BUILD_BUTTON_TEXT="Build URL not defined"
BUILDKITE_BUILD_URL="https://buildkite.com/anza/"
fi
## setup window interval for query
window_interval="10s"
window_interval_long="10s"
oversize_window=$(echo "${DURATION}+300" | bc)
printf -v oversize_window "%ss" "$oversize_window"
if [[ "$LARGE_DATA_SET" == "true" ]];then
[[ ! "$INFLUX_WINDOW_INTERVAL" ]] && INFLUX_WINDOW_INTERVAL="10m"
[[ ! "$INFLUX_WINDOW_INTERVAL_LONG" ]] && INFLUX_WINDOW_INTERVAL_LONG="30m"
window_interval=$INFLUX_WINDOW_INTERVAL
window_interval_long=$INFLUX_WINDOW_INTERVAL_LONG
oversize_window="12h"
fi
## Configuration
test_type=$TEST_TYPE
client="tpu"
[[ "$USE_TPU_CLIENT" == "false" ]] && client="rpc"
git_commit=$SOLANA_GIT_COMMIT
cluster_version=$CLUSTER_VERSION
num_clients=$NUM_CLIENT
client_keypair_path="keypair-configs/$KEYPAIR_FILE"
duration=$DURATION
tx_count=$TX_COUNT
thread_batch_sleep_ms=$THREAD_BATCH_SLEEP_MS
API_V2_HOST="${INFLUX_HOST}/api/v2/query"
HEADER_AUTH="Authorization: Token ${INFLUX_TOKEN}"
CURL_TIMEOUT=12
start_time=$START_TIME
start_time2=$START_TIME2
stop_time=$STOP_TIME
stop_time2=$STOP_TIME2
## make sure
source utils.sh
source influx_data.sh
query(){
local retry=0
for retry in 0 1 2
do
if [[ $retry -gt 0 ]];then
printf "start retry:%s\n%s\n" $retry
sleep 2
fi
if [[ -z "$1" ]];then
echo "query command is empty!"
echo "$1"
fi
curl --connect-timeout ${CURL_TIMEOUT} --request POST \
"${API_V2_HOST}" \
--header "${HEADER_AUTH}" \
--header 'Accept: application/csv' \
--header 'Content-type: application/vnd.flux' \
--data "$1" > query.result
local n=0
local arr=()
local line
while IFS= read -r line
do
if [[ ${#line} -gt 1 ]];then # last line is empty but length=1
arr+=("$line")
let n=n+1
fi
done < query.result
if [[ $n -gt 1 ]]; then
printf "%s\n" "valid return"
break
else # empty or error
printf "*retry:%s\nquery error:%s\n" $retry ${arr[0]}
fi
done
}
for f in "${!FLUX[@]}"
do
echo "----FLUX ($count) $f----"
echo "${FLUX[$f]}"
done
declare -A FLUX_RESULT # collect results
for f in "${!FLUX[@]}"
do
if [[ -z "${FLUX[${f}]}" ]];then
printf "***%s %s\n%s\n" $f "is return zero-length" ${FLUX[${f}]}
fi
query "${FLUX[${f}]}"
if [[ -f 'query.result' ]];then
FLUX_RESULT[${f}]="`cat query.result`"
printf "%s %s\n" $f ${FLUX_RESULT[${f}]}
else
printf "%s%s\n" "$f" "no query.result"
fi
sleep 1
done
## For debug , printout each result of
# for r in "${!FLUX_RESULT[@]}"
# do
# result=${FLUX_RESULT[${r}]}
# echo "---- $r result ----"
# echo "$result"
# echo "-----$r end-------"
# done
## result should be like this
## ,result,table,_value
## ,_result,0,137371131
get_value() {
local arr=()
local n=0
local line
while IFS= read -r line
do
if [[ ${#line} -gt 1 ]];then # last line is empty but length=1
arr+=("$line")
let n=n+1
fi
done <<< $result_input
if [[ $n -gt 1 ]]; then
while IFS=, read -r empty result table val host_id
do
_value="$(echo "$val"|tr -d '\r\n')" #return value include a new line
done <<< "${arr[1]}"
else
_value="na"
fi
}
declare -A DATAPOINT # collect results
# write data to benchmark-report-tmp bucket
# $2:influxdb endpoint $data to write
write_datapoint_v2() {
curl -i --connect-timeout "${CURL_TIMEOUT}" -XPOST "${INFLUX_HOST}/api/v2/write?bucket=${REPORT_BUCKET}/autogen&precision=ns" \
--header "${HEADER_AUTH}" \
--data-raw "$1"
}
result_detail=""
# time for influx only
DATAPOINT[start_time]="$start_time"
DATAPOINT[stop_time]="$stop_time"
printf -v time_range_str "\"time range: %s ~ %s\"" \
"$(date --rfc-3339=seconds -u -d @$start_time)" "$(date --rfc-3339=seconds -u -d @$stop_time)"
DATAPOINT[time_range]="$time_range_str"
# slot
result_input=${FLUX_RESULT['start_slot']}
get_value
start_slot_txt="start_slot: $_value"
DATAPOINT[start_slot]="$_value"
result_input=${FLUX_RESULT['end_slot']}
get_value
end_slot_txt="end_slot: $_value"
DATAPOINT[end_slot]="$_value"
# TPS : the query result is tps*{$window_interval}, so we need to divide {$window_interval} to get the real tps
result_input=${FLUX_RESULT['mean_tx_count']}
get_value
extract_time_in_sec "${window_interval}"
[[ ${duration_in_seconds} -eq "0" ]]&& tps="0" || tps=$(echo "scale=0;$_value/${duration_in_seconds}"|bc)
mean_tx_count_txt="mean_tps: $tps"
DATAPOINT[mean_tps]="$tps"
result_input=${FLUX_RESULT['max_tx_count']}
get_value
extract_time_in_sec "${window_interval}"
[[ ${duration_in_seconds} -eq "0" ]]&& tps="0" || tps=$(echo "scale=0;$_value/${duration_in_seconds}"|bc)
max_tx_count_txt="max_tps: $tps"
DATAPOINT[max_tps]="$tps"
result_input=${FLUX_RESULT['p90_tx_count']}
get_value
extract_time_in_sec "${window_interval}"
[[ ${duration_in_seconds} -eq "0" ]]&& tps="0" || tps=$(echo "scale=0;$_value/${duration_in_seconds}"|bc)
p90_tx_count_txt="90th_tx_count: $tps"
DATAPOINT[90th_tx_count]="$tps"
result_input="${FLUX_RESULT['p99_tx_count']}"
get_value
extract_time_in_sec "${window_interval}"
tps=$(echo "scale=0;$_value/${duration_in_seconds}"|bc)
p99_tx_count_txt="99th_tx_count: $tps"
DATAPOINT[99th_tx_count]="$tps"
# tower distance
result_input="${FLUX_RESULT['mean_tower_vote_distance']}"
echo "${FLUX_RESULT['mean_tower_vote_distance']}"
get_value
mean_tower_vote_distance_txt="mean_tower_vote_distance: $_value"
DATAPOINT[mean_tower_vote_distance]="$_value"
result_input="${FLUX_RESULT['max_tower_vote_distance']}"
get_value
max_tower_vote_distance_txt="max_tower_vote_distance: $_value"
DATAPOINT[max_tower_vote_distance]="$_value"
result_input="${FLUX_RESULT['min_tower_vote_distance']}"
get_value
result_input="${FLUX_RESULT['p90_tower_vote_distance']}"
get_value
p90_tower_vote_distance_txt="90th_tower_vote_distance: $_value"
DATAPOINT[90th_tower_vote_distance]="$_value"
result_input="${FLUX_RESULT['p99_tower_vote_distance']}"
get_value
p99_tower_vote_distance_txt="99th_tower_vote_distance: $_value"
DATAPOINT[99th_tower_vote_distance]="$_value"
# optimistic_slot_elapsed
result_input="${FLUX_RESULT['mean_optimistic_slot_elapsed']}"
get_value
mean_optimistic_slot_elapsed_txt="mean_optimistic_slot_elapsed: $_value"
DATAPOINT[mean_optimistic_slot_elapsed]="$_value"
result_input="${FLUX_RESULT['max_optimistic_slot_elapsed']}"
get_value
max_optimistic_slot_elapsed_txt="max_optimistic_slot_elapsed: $_value"
DATAPOINT[max_optimistic_slot_elapsed]="$_value"
result_input="${FLUX_RESULT['p90_optimistic_slot_elapsed']}"
get_value
p90_optimistic_slot_elapsed_txt="90th_optimistic_slot_elapsed: $_value"
result_input="${FLUX_RESULT['p99_optimistic_slot_elapsed']}"
DATAPOINT[90th_optimistic_slot_elapsed]="$_value"
get_value
p99_optimistic_slot_elapsed_txt="99th_optimistic_slot_elapsed: $_value"
DATAPOINT[99th_optimistic_slot_elapsed]="$_value"
# ct_stats_block_cost
result_input="${FLUX_RESULT['mean_ct_stats_block_cost']}"
get_value
mean_ct_stats_block_cost_txt="mean_cost_tracker_stats_block_cost: $_value"
DATAPOINT[mean_cost_tracker_stats_block_cost]="$_value"
result_input="${FLUX_RESULT['max_ct_stats_block_cost']}"
get_value
max_ct_stats_block_cost_txt="max_cost_tracker_stats_block_cost: $_value"
DATAPOINT[max_cost_tracker_stats_block_cost]="$_value"
result_input="${FLUX_RESULT['p90_ct_stats_block_cost']}"
get_value
p90_ct_stats_block_cost_txt="90th_cost_tracker_stats_block_cost: $_value"
DATAPOINT[90th_cost_tracker_stats_block_cost]="$_value"
result_input="${FLUX_RESULT['p99_ct_stats_block_cost']}"
get_value
p99_ct_stats_block_cost_txt="99th_cost_tracker_stats_block_cost: $_value"
DATAPOINT[99th_cost_tracker_stats_block_cost]="$_value"
# ct_stats_block_cost
result_input="${FLUX_RESULT['mean_ct_stats_transaction_count']}"
get_value
mean_mean_ct_stats_tx_count_txt="mean_cost_tracker_stats_transaction_count: $_value"
DATAPOINT[mean_cost_tracker_stats_transaction_count]="$_value"
result_input="${FLUX_RESULT['max_ct_stats_transaction_count']}"
get_value
max_mean_ct_stats_tx_count_txt="max_cost_tracker_stats_transaction_count: $_value"
DATAPOINT[max_cost_tracker_stats_transaction_count]="$_value"
result_input="${FLUX_RESULT['p90_ct_stats_transaction_count']}"
get_value
p90_mean_ct_stats_tx_count_txt="90th_cost_tracker_stats_transaction_count: $_value"
DATAPOINT[90th_cost_tracker_stats_transaction_count]="$_value"
result_input="${FLUX_RESULT['p99_ct_stats_transaction_count']}"
get_value
p99_mean_ct_stats_tx_count_txt="99th_cost_tracker_stats_transaction_count: $_value"
DATAPOINT[99th_cost_tracker_stats_transaction_count]="$_value"
# ct_stats_number_of_accounts
result_input="${FLUX_RESULT['mean_ct_stats_number_of_accounts']}"
get_value
mean_ct_stats_num_of_accts_txt="mean_cost_tracker_stats_number_of_accounts: $_value"
DATAPOINT[mean_cost_tracker_stats_number_of_accounts]="$_value"
result_input="${FLUX_RESULT['max_ct_stats_number_of_accounts']}"
get_value
max_ct_stats_num_of_accts_txt="max_cost_tracker_stats_number_of_accounts: $_value"
DATAPOINT[max_cost_tracker_stats_number_of_accounts]="$_value"
result_input="${FLUX_RESULT['p90_ct_stats_number_of_accounts']}"
get_value
p90_ct_stats_num_of_accts_txt="90th_cost_tracker_stats_number_of_accounts: $_value"
DATAPOINT[90th_cost_tracker_stats_number_of_accounts]="$_value"
result_input="${FLUX_RESULT['p99_ct_stats_number_of_accounts']}"
get_value
p99_ct_stats_num_of_accts_txt="99th_cost_tracker_stats_number_of_accounts: $_value"
DATAPOINT[99th_cost_tracker_stats_number_of_accounts]="$_value"
# # blocks fill
result_input="${FLUX_RESULT['total_blocks']}"
get_value
if [[ "$_value" == "na" ]];then
_value=0
fi
total_blocks_tmp=$_value
total_blocks_txt="numb_total_blocks: $_value"
DATAPOINT[numb_total_blocks]="$_value"
result_input="${FLUX_RESULT['blocks_fill_50']}"
get_value
blocks_fill_50_txt="numb_blocks_50_full: $_value"
DATAPOINT[numb_blocks_50_full]="$_value"
if [[ "$_value" == "na" || $total_blocks_tmp -eq 0 ]];then
percent_value="0%"
percent_raw_value=0
else
percent_raw_value=$(echo "scale=2;($_value/$total_blocks_tmp)*100" | bc)
printf -v percent_value "%.0f%s" $percent_raw_value "%"
fi
blocks_fill_50_percent_txt="blocks_50_full: $percent_value"
DATAPOINT[blocks_50_full]="$percent_raw_value"
result_input="${FLUX_RESULT['blocks_fill_90']}"
get_value
blocks_fill_90_txt="numb_blocks_90_full: $_value"
DATAPOINT[numb_blocks_90_full]="$_value"
if [[ "$_value" == "na" || $total_blocks_tmp -eq 0 ]];then
percent_value="0%"
percent_raw_value=0
else
percent_raw_value=$(echo "scale=2;($_value/$total_blocks_tmp)*100" | bc)
printf -v percent_value "%.0f%s" $percent_raw_value "%"
fi
blocks_fill_90_percent_txt="blocks_90_full: $percent_value"
DATAPOINT[blocks_90_full]="$percent_raw_value"
# skip_rate
result_input="${FLUX_RESULT['mean_skip_rate']}"
get_value
[[ $_value != "na" ]] && printf -v precision "%.2f" "$_value" || precision="na"
mean_skip_rate_txt="mean_skip_rate: $precision%"
DATAPOINT[mean_skip_rate]="$_value"
result_input="${FLUX_RESULT['max_skip_rate']}"
get_value
[[ $_value != "na" ]] && printf -v precision "%.2f" "$_value" || precision="na"
max_skip_rate_txt="max_skip_rate: $precision%"
DATAPOINT[max_skip_rate]="$_value"
result_input="${FLUX_RESULT['skip_rate_90']}"
get_value
[[ $_value != "na" ]] && printf -v precision "%.2f" "$_value" || precision="na"
skip_rate_90_txt="skip_rate_90: $precision%"
DATAPOINT[skip_rate_90]="$_value"
result_input="${FLUX_RESULT['mean_skip_rate_b4_test']}"
get_value
[[ $_value != "na" ]] && printf -v precision "%.2f" "$_value" || precision="na"
mean_skip_rate_b4_test_txt="mean_skip_rate_b4_test: $precision%"
DATAPOINT[mean_skip_rate_b4_test]="$_value"
#write data report to the influx
build="$BUILDKITE_BUILD_NUMBER"
[[ ! "$BUILDKITE_BUILD_NUMBER" ]] && build="0"
utc_sec=$(date +%s)
write_ts=$(echo "scale=2;${utc_sec}*1000000000" | bc)
declare -A FIELD_MEASUREMENT
FIELD_MEASUREMENT[mean_tps]=tps
for r in "${!DATAPOINT[@]}"
do
measurement=${FIELD_MEASUREMENT[$r]}
write_data="$measurement,build=$build,client=$client,branch=$SOLANA_BUILD_BRANCH,git_commit=$git_commit,cluster_version=$cluster_version,\
clients_num=$num_clients,duration=$duration,tx_count=$tx_count,thread_batch_sleep_ms=$thread_batch_sleep_ms,durable_nonce=$USE_DURABLE_NONCE $r=${DATAPOINT[$r]} $write_ts"
write_datapoint_v2 "$write_data" "$API_V2_HOST"
done
## create Grafana link
gf_from=$(echo "scale=2;${start_time}*1000" | bc)
gf_to=$(echo "scale=2;${stop_time}*1000" | bc)
gf_prefix="https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry?orgId=1&from="
gf_postfix="&var-datasource=Influx-Enterprise&var-testnet=tds&var-hostid=All"
printf -v gf_url "%s%s%s%s%s" $gf_prefix $gf_from "&to=" $gf_to $gf_postfix
if [[ $SLACK_WEBHOOK ]];then
source slack.sh
slack_send
fi
if [[ $DISCORD_WEBHOOK ]];then
source discord.sh
discord_send
fi