diff --git a/.gitignore b/.gitignore index c5dee72f0..3307395cf 100644 --- a/.gitignore +++ b/.gitignore @@ -82,4 +82,7 @@ lcov.info **/build/** -Move.lock \ No newline at end of file +Move.lock + +# local configs +*.local.yaml \ No newline at end of file diff --git a/crates/key-server/src/aggregator/server.rs b/crates/key-server/src/aggregator/server.rs index 6856b0fbd..2a96cf449 100644 --- a/crates/key-server/src/aggregator/server.rs +++ b/crates/key-server/src/aggregator/server.rs @@ -119,6 +119,7 @@ impl NetworkConfig for AggregatorOptions { struct AppState { aggregator_metrics: Arc, grpc_client: SuiGrpcClient, + http_client: reqwest::Client, threshold: Arc>, committee_members: Arc>>, options: AggregatorOptions, @@ -318,6 +319,7 @@ async fn handle_fetch_key( let ks_version_req = &state.options.key_server_version_requirement; let api_credentials = &state.options.api_credentials; let metrics = state.aggregator_metrics.clone(); + let http_client = state.http_client.clone(); let mut fetch_tasks: FuturesUnordered<_> = state .committee_members .read() @@ -331,6 +333,7 @@ async fn handle_fetch_key( let ks_version_req = ks_version_req.clone(); let api_creds = api_credentials.get(&partial_key_server.name).cloned(); let metrics = metrics.clone(); + let http_client = http_client.clone(); async move { // Check if API credentials exist for this server. let creds = match api_creds { @@ -351,6 +354,7 @@ async fn handle_fetch_key( req_id, &ks_version_req, creds, + &http_client, ) .await { @@ -446,13 +450,12 @@ async fn fetch_from_member( req_id: &str, ks_version_req: &VersionReq, api_credentials: ApiCredentials, + client: &reqwest::Client, ) -> Result { info!( "Fetching from party {} at {} (req_id: {})", member.party_id, member.url, req_id ); - - let client = reqwest::Client::new(); let request_builder = client .post(format!("{}/v1/fetch_key", member.url)) .header(HEADER_CLIENT_SDK_TYPE, SDK_TYPE_AGGREGATOR) @@ -664,9 +667,19 @@ async fn load_committee_state( // Check and warn about missing API credentials for current committee. check_missing_api_credentials(&members, &options.api_credentials); + // Create HTTP client with increased connection pool limits for localhost testing + // and high-concurrency production scenarios + let http_client = reqwest::Client::builder() + .pool_max_idle_per_host(500) // Allow 500 idle connections per host (for localhost testing) + .pool_idle_timeout(Duration::from_secs(90)) + .timeout(Duration::from_secs(10)) + .build() + .context("Failed to create HTTP client")?; + Ok(AppState { aggregator_metrics: metrics, grpc_client, + http_client, committee_members: Arc::new(RwLock::new(members)), threshold: Arc::new(RwLock::new(threshold)), options, @@ -841,10 +854,17 @@ mod tests { let registry = Registry::new(); let metrics = Arc::new(AggregatorMetrics::new(®istry)); let grpc_client = SuiGrpcClient::new(options.node_url()).unwrap(); + let http_client = reqwest::Client::builder() + .pool_max_idle_per_host(500) + .pool_idle_timeout(Duration::from_secs(90)) + .timeout(Duration::from_secs(10)) + .build() + .unwrap(); AppState { aggregator_metrics: metrics, grpc_client, + http_client, threshold: Arc::new(RwLock::new(threshold)), committee_members: Arc::new(RwLock::new(VecMap(SuiVecMap { contents: committee_contents, diff --git a/crates/key-server/src/cache.rs b/crates/key-server/src/cache.rs index 32876806d..504ad6bd3 100644 --- a/crates/key-server/src/cache.rs +++ b/crates/key-server/src/cache.rs @@ -6,7 +6,7 @@ use moka::sync::Cache; use std::hash::Hash; use std::time::Duration; -pub(crate) const DEFAULT_SIZE: u64 = 1000; +pub(crate) const DEFAULT_SIZE: u64 = 0; pub(crate) const DEFAULT_TTL_IN_MILLIS: u64 = 60 * 60 * 1000; // 1 hour /// Creates a new thread-safe LRU cache with the specified TTL and size. diff --git a/scripts/start-committee-20-servers.sh b/scripts/start-committee-20-servers.sh new file mode 100755 index 000000000..93182a15b --- /dev/null +++ b/scripts/start-committee-20-servers.sh @@ -0,0 +1,318 @@ +#!/bin/bash +# Script to start 20 committee key servers and 1 aggregator server +# +# Usage: +# ./start-committee-20-servers.sh +# ./start-committee-20-servers.sh stop +# +# Example: +# ./start-committee-20-servers.sh 0 0x8a0e2e09a4c5255336d234b11014642b350634f07d07df6fc4c17bf07430c872 \ +# "0x0cef...:0x7239...:2024,0x15ef...:0x5ae8...:2025,..." + +# Change to the seal repository root directory (where Cargo.toml is) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SEAL_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$SEAL_ROOT" || exit 1 + +echo "Running from: $(pwd)" +echo "" + +# Check for help command +if [[ "$1" == "help" || "$1" == "--help" || "$1" == "-h" ]]; then + echo "Usage: $0 [COMMAND|VERSION KEY_SERVER_OBJ_ID SERVER_LIST]" + echo "" + echo "Commands:" + echo " status Check status of all running servers" + echo " stop Stop all running servers" + echo " help Show this help message" + echo "" + echo "Parameters:" + echo " VERSION Master share version (e.g., 0, 1, 2)" + echo " KEY_SERVER_OBJ_ID Key server object ID (0x...)" + echo " SERVER_LIST Comma-separated list: member_address:master_share:port" + echo "" + echo "Example:" + echo " $0 0 0x8a0e2e09a4c5255336d234b11014642b350634f07d07df6fc4c17bf07430c872 \\" + echo " \"0x0cef...:0x7239...:2024,0x15ef...:0x5ae8...:2025,0x271c...:0x4f6e...:2026\"" + echo "" + echo "Default server ports:" + echo " Key Servers 0-19: http://localhost:2024-2043" + echo " Aggregator: http://localhost:3000" + exit 0 +fi + +# Check for status command +if [[ "$1" == "status" || "$1" == "--status" ]]; then + echo "Checking server status..." + echo "" + + # Check aggregator + echo "=== Aggregator ===" + AGGREGATOR_UP=false + if curl -s -f http://localhost:3000/health > /dev/null 2>&1; then + echo "✓ Aggregator (http://localhost:3000) - UP" + AGGREGATOR_UP=true + else + echo "✗ Aggregator (http://localhost:3000) - DOWN" + fi + echo "" + + # Check all 20 key servers + echo "=== Key Servers ===" + UP_COUNT=0 + DOWN_COUNT=0 + + for port in {2024..2043}; do + i=$((port - 2024)) + if curl -s -f "http://localhost:${port}/health" > /dev/null 2>&1; then + echo "✓ Server ${i} (http://localhost:${port}) - UP" + ((UP_COUNT++)) + else + echo "✗ Server ${i} (http://localhost:${port}) - DOWN" + ((DOWN_COUNT++)) + fi + done + + echo "" + echo "=== Summary ===" + TOTAL_UP=$UP_COUNT + if [ "$AGGREGATOR_UP" = true ]; then + ((TOTAL_UP++)) + fi + echo "Total Servers UP: ${TOTAL_UP}/21 (1 Aggregator + 20 Key Servers)" + echo "Key Servers UP: ${UP_COUNT}/20" + echo "Key Servers DOWN: ${DOWN_COUNT}/20" + + if [ "$AGGREGATOR_UP" = true ] && [ $DOWN_COUNT -eq 0 ]; then + echo "✓ All servers are running!" + exit 0 + else + echo "⚠ Some servers are not responding" + exit 1 + fi +fi + +# Check for stop command +if [[ "$1" == "stop" || "$1" == "--stop" ]]; then + echo "Stopping all servers..." + pkill -9 -f "key-server|aggregator-server" 2>/dev/null || true + + # Kill any processes on the ports we use + for port in {2024..2043} 3000; do + lsof -ti:$port | xargs kill -9 2>/dev/null || true + done + + echo "All servers stopped." + exit 0 +fi + +# Cleanup any existing servers +echo "Cleaning up existing servers..." +pkill -9 -f "key-server|aggregator-server" 2>/dev/null || true +sleep 2 + +# Kill any processes on the ports we need +for port in {2024..2043} 3000; do + lsof -ti:$port | xargs kill -9 2>/dev/null || true +done +sleep 1 +echo "Cleanup complete." +echo "" + +# Parse arguments or use defaults from the 20/20 committee setup +VERSION="${1:-0}" +KEY_SERVER_OBJ_ID="${2:-0x8a0e2e09a4c5255336d234b11014642b350634f07d07df6fc4c17bf07430c872}" +SERVER_LIST="${3:-0x0ceffe3ba385abe7a11f535e3428ae2ff4508eb4b6d370b829318b0d901c1152:0x72392027c9c324a304cc35f13b6e2e66a6bc6f6bc9845fb78708745c584e5914:2024,0x15ef03731c612580a2c604696da996641d0426c80a202b92f97beb7e55ceccae:0x5ae82e12772fb102a0d6d91668a5799766093c40e9ba48493969330aa180fdb2:2025,0x271c3b8f29569652d20b9a18b1de628dae8bae43d46ba6c7b43968451710bae3:0x4f6ebd09abd1955e430d5aa6ab63437336a2672eabf4dff9ca98fe84808ce90c:2026,0x3c703fcef8a951d873e8347d1cdef0cd27e67323b6a1224f82d36d9ba2646269:0x67befc5bf47ce4765cdd4bc1045f9e1882e4078fdfaa02381c729e1c4d8077ff:2027,0x3fedeecd471d5bcba57eb542ed9dffe6556cb39ff2a96903d176343720271783:0x1ef21ba47fd1a8ba7852ee921d7af4c657b5e5ac4446ead4aa24e2e896106888:2028,0x46d8593c7f4e166c1eb9b8a710c573b6a4a68fc8336c5502a378491294b33d41:0x412e9d0de23a49d8afb3cfae02aac6f51abee9f5fd1e7d172f59b7d6a09e9563:2029,0x4c337121425049467206484b28986b9a2451b75367ed535a7685f047efb08888:0x696547a6e8e64fdd4b2d90360fad84d9bd07f3e5d8a2ef371e9a5f0aa74cb5df:2030,0x4cfc213c9d593493f207d085ae471b2a8f92e27338e5081bee72ec7d6d3f9a80:0x13e810a4d5679643e63a1c61ecba33734200790770bf332c6a5840200ac35bfd:2031,0x517024b07435eb2466d50226070af215932e6b7bc1d11f5f9a871835fa0f5404:0x63f17cccb11786a3be809c2a11c50b51cb06c2ac17134a97c898c5c6c738336c:2032,0x522c2901051b29dbe369fee0df577fb55bf23beb261d8c2c6ae9c032f778782d:0x6ae8460789371098a09af19c8477b8c5bcdf3edd8f4f0905048c48fa4259d8c1:2033,0x53dbf0f11dab2b549f41d22df1e1e627e5fb30c4fe8b38cf8edd0324738998b7:0x0eb1680f1e292b1542b1804e15bd79b5146d3e3f598f64f2922a734a88b87ffe:2034,0x58ced33704a6bf48c22782ef577c74234732c11e53a556e062feb6d79eff1b74:0x69e365cdda5361dd3fb591d9793564bd9d91f09e2c4bb13bff36fee0cb6c670d:2035,0x65db887e741dcc283bccf9ba2e0c9299703bc396126ba672ba5d7c19d0a26a2c:0x0051c865dc47fa9b25622e891c559db42d1645427a69e42858c309a0c444418f:2036,0x79a81c8f8838c0e52819445f2aeaf50be535f9eb62461aa5120b0d423e463b67:0x5e46729d754ffa07d3ddf3f3a73aee8083c0a819588e932391ca88d9a3ce3ce6:2037,0x7c92760e35a6019fd22d184f50a17cde561ec56975525e00f8765ddfa7f313d2:0x3efd8e29656e3a692af13635e90c634d402b398e685a5c785035e7751621b8af:2038,0x80495858eb94281f1f2c90536e2d77cc56e473d686ffe9c308b319a665288531:0x337b30b1688cecb48d6f4002a3e4077ff300ac2ebb3fbe7f61fc6f252b5e1c33:2039,0x979578f86bac45626ec7696054bf9a7505545d03f9d373671c2fcfdfd9da35b3:0x110111312248b4c0c796c523381fe1956e8aa382002c310c24ad1871a5530775:2040,0xa2aea96e9d16f026074456dee88a8b02bd68f2ce0d12997614424d6ecb8182ce:0x6c9b5a9555213cdef7ce74156bdc8b5f519597fa47b32535ea2f353cf5ef3c79:2041,0xaf31a3fbd658163f03741890b8ca1189b5c73e9ad8be9b95a0fe697f237062b2:0x480d2104b2cae45d84568f0ef688cf35bea3448291a2e233f6544449e954d89f:2042,0xc1ba9b2a4608c387a168610f586ccbe45e21aea2d614cc2f0263742dffe88089:0x5c385b07a73fca4812765fb09b42e32c5d56179d7fdd7efd98116050d2d0e869:2043}" +NETWORK="Testnet" + +# Parse server list into arrays +IFS=',' read -ra SERVERS <<< "$SERVER_LIST" +MEMBER_ADDRS=() +MASTER_SHARES=() +PORTS=() + +for server_entry in "${SERVERS[@]}"; do + IFS=':' read -ra PARTS <<< "$server_entry" + if [ ${#PARTS[@]} -ne 3 ]; then + echo "Error: Invalid server entry format: $server_entry" + echo "Expected format: member_address:master_share:port" + exit 1 + fi + MEMBER_ADDRS+=("${PARTS[0]}") + MASTER_SHARES+=("${PARTS[1]}") + PORTS+=("${PARTS[2]}") +done + +NUM_SERVERS=${#MEMBER_ADDRS[@]} +echo "Configuration:" +echo " Version: V${VERSION}" +echo " Key Server Object ID: ${KEY_SERVER_OBJ_ID}" +echo " Number of servers: ${NUM_SERVERS}" +echo " Committee ID: 0x289302c9f01a2a828947f1e27cd98438b8da9c9a5cced2a74c36057593d398b5" +echo "" + +# Create config files +echo "Creating config files..." + +for i in "${!MEMBER_ADDRS[@]}"; do + METRICS_PORT=$((9184 + i)) + cat > crates/key-server/key-server-config-${i}.local.yaml < crates/key-server/src/aggregator/aggregator-config-test.local.yaml <&1 | sed "s/^/[SERVER-${i}] /" & + + PID=$! + SERVER_PIDS+=($PID) + echo "Server ${i} started on http://localhost:${PORT} (PID: ${PID})" +done + +# Start aggregator server on port 3000 +RUST_LOG=info \ + PORT=3000 \ + CONFIG_PATH=crates/key-server/src/aggregator/aggregator-config-test.local.yaml \ + ./target/release/aggregator-server 2>&1 | sed 's/^/[AGGREGATOR] /' & +PID_AGG=$! +echo "Aggregator started on http://localhost:3000 (PID: $PID_AGG)" + +echo "" +echo "All servers started!" +for i in "${!MEMBER_ADDRS[@]}"; do + echo " - Key Server ${i}: http://localhost:${PORTS[$i]} (PID: ${SERVER_PIDS[$i]})" +done +echo " - Aggregator: http://localhost:3000 (PID: $PID_AGG)" +echo "" +echo "Waiting for servers to be ready..." + +# Function to check health endpoint +check_health() { + local port=$1 + local name=$2 + local max_attempts=60 + local attempt=0 + + while [ $attempt -lt $max_attempts ]; do + if curl -s -f "http://localhost:$port/health" > /dev/null 2>&1; then + echo "✓ $name is ready" + return 0 + fi + attempt=$((attempt + 1)) + sleep 1 + done + + echo "✗ $name failed to start (timeout after ${max_attempts}s)" + return 1 +} + +# Check all key servers +for i in "${!PORTS[@]}"; do + check_health "${PORTS[$i]}" "Key Server ${i}" & +done + +# Check aggregator +check_health 3000 "Aggregator" & + +# Wait for all health checks to complete +wait + +echo "" +echo "All servers are ready!" +echo "" +echo "To stop all servers, run:" +echo " ./scripts/start-committee-20-servers.sh stop" +echo "" +echo "Or manually:" +ALL_PIDS="${SERVER_PIDS[@]} $PID_AGG" +echo " kill $ALL_PIDS" diff --git a/tests/concurrent-decrypt-test.ts b/tests/concurrent-decrypt-test.ts new file mode 100644 index 000000000..3f4c0d34c --- /dev/null +++ b/tests/concurrent-decrypt-test.ts @@ -0,0 +1,304 @@ +// Copyright (c), Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { fromHex } from "@mysten/bcs"; +import { Ed25519Keypair } from "@mysten/sui/keypairs/ed25519"; +import { Transaction } from "@mysten/sui/transactions"; +import { getFullnodeUrl, SuiClient } from "@mysten/sui/client"; +import { SealClient, SessionKey } from "@mysten/seal"; +import assert from "assert"; +import { parseArgs } from "node:util"; + +const PACKAGE_ID = "0x9e96366200f7811e52e741e395c200d280886913d40114d5c59b1e5ed8c3733a"; + +interface DecryptResult { + callId: number; + decryptTimeMs: number; + success: boolean; + error?: string; +} + +// Helper function to generate random data +function generateRandomData(size: number): Uint8Array { + const MAX_CHUNK_SIZE = 65536; + const data = new Uint8Array(size); + + for (let offset = 0; offset < size; offset += MAX_CHUNK_SIZE) { + const chunkSize = Math.min(MAX_CHUNK_SIZE, size - offset); + crypto.getRandomValues(data.subarray(offset, offset + chunkSize)); + } + + return data; +} + +async function runConcurrentDecryptTest( + network: "testnet" | "mainnet", + serverConfigs: Array<{ + objectId: string; + aggregatorUrl?: string; + apiKeyName?: string; + apiKey?: string; + weight: number; + }>, + threshold: number, + concurrentCalls: number = 20, +): Promise { + // Setup + const keypair = Ed25519Keypair.generate(); + const suiAddress = keypair.getPublicKey().toSuiAddress(); + const suiClient = new SuiClient({ url: getFullnodeUrl(network) }); + + console.log(`Test address: ${suiAddress}`); + console.log(`Network: ${network}`); + console.log(`Threshold: ${threshold}/${serverConfigs.length}`); + console.log(`Concurrent calls: ${concurrentCalls}`); + console.log("---\n"); + + // Create client + const client = new SealClient({ + suiClient, + serverConfigs, + verifyKeyServers: false, + }); + + // Generate 1KB test data + console.log("Generating test data (1KB)..."); + const testData = generateRandomData(1024); + console.log("Test data generated\n"); + + // Encrypt once to get encrypted data to use for all decrypt calls + console.log("Encrypting data..."); + const { encryptedObject: encryptedBytes } = await client.encrypt({ + threshold, + packageId: PACKAGE_ID, + id: suiAddress, + data: testData, + }); + console.log("Encryption complete\n"); + + // Create session key for decryption + console.log("Creating session key..."); + const sessionKey = await SessionKey.create({ + address: suiAddress, + packageId: PACKAGE_ID, + ttlMin: 10, + signer: keypair, + suiClient, + }); + console.log("Session key created\n"); + + // Construct transaction bytes for seal_approve + const tx = new Transaction(); + const keyIdArg = tx.pure.vector("u8", fromHex(suiAddress)); + tx.moveCall({ + target: `${PACKAGE_ID}::account_based::seal_approve`, + arguments: [keyIdArg], + }); + const txBytes = await tx.build({ + client: suiClient, + onlyTransactionKind: true, + }); + + // Perform concurrent decrypt calls + console.log(`Starting ${concurrentCalls} concurrent decrypt calls...\n`); + + const decryptPromises = Array.from({ length: concurrentCalls }, (_, i) => + (async (): Promise => { + const callId = i + 1; + try { + const decryptStart = performance.now(); + const decryptedData = await client.decrypt({ + data: encryptedBytes, + sessionKey, + txBytes, + }); + const decryptEnd = performance.now(); + const decryptTimeMs = decryptEnd - decryptStart; + + // Verify data integrity + assert.deepEqual(decryptedData, testData); + + console.log(`Call ${callId}: ${decryptTimeMs.toFixed(2)}ms ✅`); + + return { + callId, + decryptTimeMs, + success: true, + }; + } catch (error) { + console.error(`Call ${callId}: FAILED ❌`, error); + return { + callId, + decryptTimeMs: 0, + success: false, + error: error instanceof Error ? error.message : String(error), + }; + } + })(), + ); + + const results = await Promise.all(decryptPromises); + + return results; +} + +function printResults(results: DecryptResult[]) { + console.log("\n"); + console.log("=".repeat(80)); + console.log("CONCURRENT DECRYPT TEST RESULTS"); + console.log("=".repeat(80)); + console.log(); + + const successfulResults = results.filter((r) => r.success); + const failedResults = results.filter((r) => !r.success); + + if (successfulResults.length > 0) { + console.log("Successful Decrypts:"); + console.log("Call ID Decrypt Time"); + console.log("-".repeat(40)); + + for (const result of successfulResults) { + console.log( + `${result.callId.toString().padStart(7)} ${result.decryptTimeMs.toFixed(2).padStart(12)}ms`, + ); + } + + // Calculate statistics + const times = successfulResults.map((r) => r.decryptTimeMs); + const avg = times.reduce((a, b) => a + b, 0) / times.length; + const min = Math.min(...times); + const max = Math.max(...times); + const median = times.sort((a, b) => a - b)[Math.floor(times.length / 2)]; + + console.log("\nStatistics:"); + console.log(` Total calls: ${results.length}`); + console.log(` Successful: ${successfulResults.length}`); + console.log(` Failed: ${failedResults.length}`); + console.log(` Average time: ${avg.toFixed(2)}ms`); + console.log(` Median time: ${median.toFixed(2)}ms`); + console.log(` Min time: ${min.toFixed(2)}ms`); + console.log(` Max time: ${max.toFixed(2)}ms`); + console.log(` Range: ${(max - min).toFixed(2)}ms`); + } + + if (failedResults.length > 0) { + console.log("\nFailed Decrypts:"); + for (const result of failedResults) { + console.log(` Call ${result.callId}: ${result.error}`); + } + } +} + +// Parse command line arguments +const args = process.argv.slice(2).filter((arg) => arg !== "--"); + +const { values } = parseArgs({ + args, + options: { + network: { + type: "string", + default: "testnet", + }, + servers: { + type: "string", + }, + threshold: { + type: "string", + }, + concurrent: { + type: "string", + default: "20", + }, + }, +}); + +const network = values.network as "testnet" | "mainnet"; +if (network !== "testnet" && network !== "mainnet") { + console.error('Error: network must be either "testnet" or "mainnet"'); + process.exit(1); +} + +// Parse servers (JSON format) +if (!values.servers) { + console.error("Error: --servers is required"); + console.error( + 'Example: --servers \'[{"objectId":"0x8a0e2e09a4c5255336d234b11014642b350634f07d07df6fc4c17bf07430c872","aggregatorUrl":"http://localhost:3000"}]\' --threshold 11', + ); + process.exit(1); +} + +type ServerConfig = { + objectId: string; + aggregatorUrl?: string; + apiKeyName?: string; + apiKey?: string; + weight?: number; +}; + +let serverConfigs: ServerConfig[]; + +try { + serverConfigs = JSON.parse(values.servers); + if (!Array.isArray(serverConfigs) || serverConfigs.length === 0) { + console.error("Error: servers must be a non-empty JSON array"); + process.exit(1); + } + for (const config of serverConfigs) { + if (!config.objectId) { + console.error("Error: each server must have an objectId"); + process.exit(1); + } + } +} catch (error) { + console.error("Error: failed to parse servers JSON:", error); + process.exit(1); +} + +// Build server configs with weights +const serverConfigsWithWeights = serverConfigs.map((config) => ({ + ...config, + weight: config.weight ?? 1, +})); + +// Parse threshold (default to number of servers) +let threshold: number; +if (values.threshold) { + threshold = parseInt(values.threshold, 10); + if (isNaN(threshold) || threshold <= 0) { + console.error("Invalid threshold."); + process.exit(1); + } + if (threshold > serverConfigsWithWeights.length) { + console.error( + `Error: threshold (${threshold}) cannot exceed number of servers (${serverConfigsWithWeights.length})`, + ); + process.exit(1); + } +} else { + threshold = serverConfigsWithWeights.length; + console.log(`Using default threshold: ${threshold} (number of servers)`); +} + +// Parse concurrent calls +const concurrentCalls = parseInt(values.concurrent!, 10); +if (isNaN(concurrentCalls) || concurrentCalls <= 0) { + console.error("Invalid concurrent calls value."); + process.exit(1); +} + +runConcurrentDecryptTest(network, serverConfigsWithWeights, threshold, concurrentCalls) + .then((results) => { + printResults(results); + const failedCount = results.filter((r) => !r.success).length; + if (failedCount === 0) { + console.log("\n✅ All concurrent decrypt calls completed successfully!"); + process.exit(0); + } else { + console.log(`\n⚠️ ${failedCount} out of ${results.length} calls failed.`); + process.exit(1); + } + }) + .catch((error) => { + console.error("Test failed:", error); + process.exit(1); + }); diff --git a/tests/load-test.ts b/tests/load-test.ts new file mode 100644 index 000000000..6349e6bcc --- /dev/null +++ b/tests/load-test.ts @@ -0,0 +1,329 @@ +// Copyright (c), Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { fromHex } from "@mysten/bcs"; +import { Ed25519Keypair } from "@mysten/sui/keypairs/ed25519"; +import { Transaction } from "@mysten/sui/transactions"; +import { getFullnodeUrl, SuiClient } from "@mysten/sui/client"; +import { SealClient, SessionKey } from "@mysten/seal"; +import assert from "assert"; +import { parseArgs } from "node:util"; + +// Package IDs for different data sizes +const PACKAGE_IDS = { + "1kb": "0x9e96366200f7811e52e741e395c200d280886913d40114d5c59b1e5ed8c3733a", + "1mb": "0x31fc37bfd6c585a5a23bdecfadf4d51443ff41415dcaf9955ae6a1096acc8768", + "10mb": "0xe57b5db1b69f9e01de7c23209583c1c8c74b7a53d5144fa3a92a068a6e2fc922", + "100mb": "0xacd083a6ab624b73d87052bd1b91cdfe2327485890fa977437653a8bace50358", +}; + +type DataSize = keyof typeof PACKAGE_IDS; + +interface TestResult { + size: DataSize; + packageId: string; + encryptTimeMs: number; + decryptTimeMs: number; + encryptMin: number; + encryptMax: number; + decryptMin: number; + decryptMax: number; + runs: number; +} + +// Helper function to generate random data larger than crypto.getRandomValues limit (65536 bytes) +function generateRandomData(size: number): Uint8Array { + const MAX_CHUNK_SIZE = 65536; + const data = new Uint8Array(size); + + for (let offset = 0; offset < size; offset += MAX_CHUNK_SIZE) { + const chunkSize = Math.min(MAX_CHUNK_SIZE, size - offset); + crypto.getRandomValues(data.subarray(offset, offset + chunkSize)); + } + + return data; +} + +async function runLoadTest( + network: "testnet" | "mainnet", + serverConfigs: Array<{ + objectId: string; + aggregatorUrl?: string; + apiKeyName?: string; + apiKey?: string; + weight: number; + }>, + threshold: number, + iterations: number = 3, +): Promise { + const results: TestResult[] = []; + + // Generate test data for each size (before any SDK timing) + console.log("Generating test data..."); + const dataGenStart = performance.now(); + const dataSizes: Record = { + "1kb": generateRandomData(1024), + "1mb": generateRandomData(1024 * 1024), + "10mb": generateRandomData(10 * 1024 * 1024), + "100mb": generateRandomData(100 * 1024 * 1024), + }; + const dataGenEnd = performance.now(); + console.log( + `Test data generated in ${(dataGenEnd - dataGenStart).toFixed(2)}ms (not included in SDK timing)\n`, + ); + + // Setup + const keypair = Ed25519Keypair.generate(); + const suiAddress = keypair.getPublicKey().toSuiAddress(); + const suiClient = new SuiClient({ url: getFullnodeUrl(network) }); + + console.log(`Test address: ${suiAddress}`); + console.log(`Network: ${network}`); + console.log(`Threshold: ${threshold}/${serverConfigs.length}`); + console.log("---"); + + // Create client + const client = new SealClient({ + suiClient, + serverConfigs, + verifyKeyServers: false, + }); + + // Warm-up run to establish connections and eliminate cold start overhead + console.log("\nPerforming warm-up run..."); + const warmupData = generateRandomData(1024); + try { + await client.encrypt({ + threshold, + packageId: PACKAGE_IDS["1kb"], + id: suiAddress, + data: warmupData, + }); + console.log("Warm-up complete\n"); + } catch (error) { + console.log("Warm-up failed (continuing anyway):", error); + } + + // Test each data size with its corresponding package ID - run multiple iterations + const sizes = Object.keys(PACKAGE_IDS) as DataSize[]; + + for (const size of sizes) { + const packageId = PACKAGE_IDS[size]; + const testData = dataSizes[size]; + + console.log( + `\nTesting ${size} (${testData.length.toLocaleString()} bytes) - ${iterations} iterations`, + ); + + const encryptTimes: number[] = []; + const decryptTimes: number[] = []; + + for (let i = 0; i < iterations; i++) { + console.log(` Run ${i + 1}/${iterations}:`); + + // Encrypt + const encryptStart = performance.now(); + const { encryptedObject: encryptedBytes } = await client.encrypt({ + threshold, + packageId, + id: suiAddress, + data: testData, + }); + const encryptEnd = performance.now(); + const encryptTimeMs = encryptEnd - encryptStart; + encryptTimes.push(encryptTimeMs); + console.log(` Encrypt: ${encryptTimeMs.toFixed(2)}ms`); + + // Create session key for decryption + const sessionKey = await SessionKey.create({ + address: suiAddress, + packageId, + ttlMin: 10, + signer: keypair, + suiClient, + }); + + // Construct transaction bytes for seal_approve + const tx = new Transaction(); + const keyIdArg = tx.pure.vector("u8", fromHex(suiAddress)); + tx.moveCall({ + target: `${packageId}::account_based::seal_approve`, + arguments: [keyIdArg], + }); + const txBytes = await tx.build({ + client: suiClient, + onlyTransactionKind: true, + }); + + // Decrypt + const decryptStart = performance.now(); + const decryptedData = await client.decrypt({ + data: encryptedBytes, + sessionKey, + txBytes, + }); + const decryptEnd = performance.now(); + const decryptTimeMs = decryptEnd - decryptStart; + decryptTimes.push(decryptTimeMs); + console.log(` Decrypt: ${decryptTimeMs.toFixed(2)}ms`); + + // Verify data integrity + assert.deepEqual(decryptedData, testData); + console.log(` ✅ Verified`); + } + + // Calculate statistics + const encryptAvg = encryptTimes.reduce((a, b) => a + b, 0) / iterations; + const decryptAvg = decryptTimes.reduce((a, b) => a + b, 0) / iterations; + + console.log(` Summary: Encrypt avg=${encryptAvg.toFixed(2)}ms, Decrypt avg=${decryptAvg.toFixed(2)}ms`); + + results.push({ + size, + packageId, + encryptTimeMs: encryptAvg, + decryptTimeMs: decryptAvg, + encryptMin: Math.min(...encryptTimes), + encryptMax: Math.max(...encryptTimes), + decryptMin: Math.min(...decryptTimes), + decryptMax: Math.max(...decryptTimes), + runs: iterations, + }); + } + + return results; +} + +function printResults(results: TestResult[]) { + console.log("\n"); + console.log("=".repeat(120)); + console.log("LOAD TEST RESULTS"); + console.log("=".repeat(120)); + console.log(); + + console.log( + "Size Encrypt (avg) Encrypt (min-max) Decrypt (avg) Decrypt (min-max) Runs", + ); + console.log("-".repeat(120)); + + for (const result of results) { + const encryptRange = `${result.encryptMin.toFixed(2)}-${result.encryptMax.toFixed(2)}`; + const decryptRange = `${result.decryptMin.toFixed(2)}-${result.decryptMax.toFixed(2)}`; + console.log( + `${result.size.padEnd(8)} ${result.encryptTimeMs.toFixed(2).padStart(12)}ms ${encryptRange.padStart(16)}ms ${result.decryptTimeMs.toFixed(2).padStart(12)}ms ${decryptRange.padStart(16)}ms ${result.runs.toString().padStart(5)}`, + ); + } + + console.log("\nPackage IDs:"); + for (const result of results) { + console.log(` ${result.size.padEnd(8)} ${result.packageId}`); + } +} + +// Parse command line arguments +const args = process.argv.slice(2).filter((arg) => arg !== "--"); + +const { values } = parseArgs({ + args, + options: { + network: { + type: "string", + default: "testnet", + }, + servers: { + type: "string", + }, + threshold: { + type: "string", + }, + iterations: { + type: "string", + default: "3", + }, + }, +}); + +const network = values.network as "testnet" | "mainnet"; +if (network !== "testnet" && network !== "mainnet") { + console.error('Error: network must be either "testnet" or "mainnet"'); + process.exit(1); +} + +// Parse servers (JSON format) +if (!values.servers) { + console.error("Error: --servers is required"); + console.error( + 'Example: --servers \'[{"objectId":"0x8a0e2e09a4c5255336d234b11014642b350634f07d07df6fc4c17bf07430c872","aggregatorUrl":"http://localhost:3000"}]\' --threshold 11', + ); + process.exit(1); +} + +type ServerConfig = { + objectId: string; + aggregatorUrl?: string; + apiKeyName?: string; + apiKey?: string; + weight?: number; +}; + +let serverConfigs: ServerConfig[]; + +try { + serverConfigs = JSON.parse(values.servers); + if (!Array.isArray(serverConfigs) || serverConfigs.length === 0) { + console.error("Error: servers must be a non-empty JSON array"); + process.exit(1); + } + for (const config of serverConfigs) { + if (!config.objectId) { + console.error("Error: each server must have an objectId"); + process.exit(1); + } + } +} catch (error) { + console.error("Error: failed to parse servers JSON:", error); + process.exit(1); +} + +// Build server configs with weights +const serverConfigsWithWeights = serverConfigs.map((config) => ({ + ...config, + weight: config.weight ?? 1, +})); + +// Parse threshold (default to number of servers) +let threshold: number; +if (values.threshold) { + threshold = parseInt(values.threshold, 10); + if (isNaN(threshold) || threshold <= 0) { + console.error("Invalid threshold."); + process.exit(1); + } + if (threshold > serverConfigsWithWeights.length) { + console.error( + `Error: threshold (${threshold}) cannot exceed number of servers (${serverConfigsWithWeights.length})`, + ); + process.exit(1); + } +} else { + threshold = serverConfigsWithWeights.length; + console.log(`Using default threshold: ${threshold} (number of servers)`); +} + +// Parse iterations (default to 3) +const iterations = parseInt(values.iterations!, 10); +if (isNaN(iterations) || iterations <= 0) { + console.error("Invalid iterations."); + process.exit(1); +} + +runLoadTest(network, serverConfigsWithWeights, threshold, iterations) + .then((results) => { + printResults(results); + console.log("\n✅ Load test completed successfully!"); + process.exit(0); + }) + .catch((error) => { + console.error("Load test failed:", error); + process.exit(1); + }); diff --git a/tests/package.json b/tests/package.json index f9ec8b6d8..f223480ad 100644 --- a/tests/package.json +++ b/tests/package.json @@ -6,6 +6,7 @@ "type": "module", "scripts": { "test": "tsx simple-seal-server.test.ts", + "load-test": "tsx load-test.ts", "prettier:check": "prettier -c --ignore-unknown .", "prettier:fix": "prettier -w --ignore-unknown .", "lint": "pnpm run prettier:check",