From 0a722b3e7d43aa5553c737c3f9cd7d55ff925c75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Wed, 16 Oct 2024 08:25:14 -0600 Subject: [PATCH 01/25] build: run redis tests using a docker integration environment (#655) Changes how redis tests are performed so they run with a dockerized integration environment instead of relying on a local `redis-server` install controlled via Rust code. Adds tasks and launch configs to VScode so they are easier to launch and adjusts the CI to use it as well. --- .cargo/config.toml | 3 + .github/workflows/ci.yaml | 21 ++-- .gitignore | 5 +- .vscode/launch.json | 47 ++++++++ .vscode/tasks.json | 47 ++++++++ .../src/service/tests/helpers/mock_service.rs | 34 ++---- .../chainhook-cli/src/service/tests/mod.rs | 101 ++++++++---------- .../src/service/tests/observer_tests.rs | 16 ++- dockerfiles/docker-compose.dev.yml | 5 + 9 files changed, 176 insertions(+), 103 deletions(-) create mode 100644 .vscode/launch.json create mode 100644 .vscode/tasks.json create mode 100644 dockerfiles/docker-compose.dev.yml diff --git a/.cargo/config.toml b/.cargo/config.toml index 64b4ad5f5..66931ba61 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,2 +1,5 @@ [alias] chainhook-install = "install --path components/chainhook-cli --locked --force --features cli --features debug --no-default-features" + +[env] +RUST_TEST_THREADS = "1" diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c947d18c1..fffd5a6ab 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -35,12 +35,6 @@ jobs: rustup toolchain install stable --profile minimal echo "RUST_VERSION_HASH=$(rustc --version | sha256sum | awk '{print $1}')" >> $GITHUB_ENV - - name: Install redis - if: matrix.suite == 'cli' - run: | - sudo apt-get install -y redis-server - echo "TARPAULIN_FLAGS=--features redis_tests" >> $GITHUB_ENV - - name: Cache cargo uses: actions/cache@v4 with: @@ -59,6 +53,13 @@ jobs: run: | cargo install cargo-tarpaulin + - name: Setup integration environment + run: | + sudo ufw disable + docker compose -f ../../dockerfiles/docker-compose.dev.yml up -d + docker compose -f ../../dockerfiles/docker-compose.dev.yml logs -t -f --no-color &> docker-compose-logs.txt & + if: matrix.suite == 'cli' + - name: Run tests run: | cargo tarpaulin --skip-clean --out lcov ${{ env.TARPAULIN_FLAGS }} -- --test-threads=1 @@ -69,6 +70,14 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} codecov_yml_path: .github/codecov.yml + - name: Print integration environment logs + run: cat docker-compose-logs.txt + if: matrix.suite == 'cli' && failure() + + - name: Teardown integration environment + run: docker compose -f ../../dockerfiles/docker-compose.dev.yml down -v -t 0 + if: matrix.suite == 'cli' && always() + distributions: runs-on: ${{ matrix.os }} diff --git a/.gitignore b/.gitignore index 5fa09387d..57280addb 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,5 @@ components/chainhook-types-js/dist *.redb cache/ Chainhook.toml - -components/chainhook-cli/src/service/tests/fixtures/tmp -components/chainhook-cli/src/archive/tests/fixtures/tmp \ No newline at end of file +**/src/service/tests/fixtures/tmp +**/src/archive/tests/fixtures/tmp \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 000000000..78f6662fb --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,47 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "test: chainhook-sdk", + "cargo": { + "args": ["test", "--no-run", "--lib", "--package=chainhook-sdk"], + "filter": { + "name": "chainhook_sdk", + "kind": "lib" + } + }, + "args": [], + "env": { + "RUST_TEST_THREADS": "1" + }, + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "test: chainhook-cli", + "cargo": { + "args": [ + "test", + "--no-run", + "--bin=chainhook", + "--package=chainhook", + "--features=redis_tests" + ], + "filter": { + "name": "chainhook", + "kind": "bin" + } + }, + "args": [], + "env": { + "RUST_TEST_THREADS": "1" + }, + "cwd": "${workspaceFolder}", + "preLaunchTask": "redis:start", + "postDebugTask": "redis:stop" + } + ] +} diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 000000000..31c49e97a --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,47 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "redis:start", + "type": "shell", + "command": "docker compose -f dockerfiles/docker-compose.dev.yml up --force-recreate -V", + "isBackground": true, + "problemMatcher": { + "pattern": { "regexp": ".", "file": 1, "location": 2, "message": 3 }, + "background": { + "activeOnStart": true, + "beginsPattern": ".", + "endsPattern": "." + } + }, + "presentation": { + "echo": true, + "reveal": "always", + "focus": false, + "panel": "dedicated", + "clear": false + } + }, + { + "label": "redis:stop", + "type": "shell", + "command": "docker compose -f dockerfiles/docker-compose.dev.yml down -v -t 0", + "isBackground": true, + "problemMatcher": { + "pattern": { "regexp": ".", "file": 1, "location": 2, "message": 3 }, + "background": { + "activeOnStart": true, + "beginsPattern": ".", + "endsPattern": "." + } + }, + "presentation": { + "echo": true, + "reveal": "always", + "focus": false, + "panel": "dedicated", + "clear": false + } + } + ] +} diff --git a/components/chainhook-cli/src/service/tests/helpers/mock_service.rs b/components/chainhook-cli/src/service/tests/helpers/mock_service.rs index ae1196def..e42e39d5a 100644 --- a/components/chainhook-cli/src/service/tests/helpers/mock_service.rs +++ b/components/chainhook-cli/src/service/tests/helpers/mock_service.rs @@ -22,8 +22,6 @@ use reqwest::Method; use rocket::serde::json::Value as JsonValue; use rocket::Shutdown; use std::path::PathBuf; -use std::process::Stdio; -use std::process::{Child, Command}; use std::sync::mpsc; use std::sync::mpsc::channel; use std::sync::mpsc::Receiver; @@ -236,21 +234,16 @@ pub async fn build_predicate_api_server(port: u16) -> (Receiver (rx, shutdown) } -pub async fn start_redis(port: u16) -> Result { - let handle = Command::new("redis-server") - .arg(format!("--port {port}")) - .stdout(Stdio::null()) - .spawn() - .map_err(|e| format!("failed to create start-redis command: {}", e))?; +pub async fn wait_for_redis(port: u16) -> Result<(), String> { let mut attempts = 0; loop { match redis::Client::open(format!("redis://localhost:{port}/")) { Ok(client) => match client.get_connection() { - Ok(_) => return Ok(handle), + Ok(_) => return Ok(()), Err(e) => { attempts += 1; if attempts == 10 { - return Err(format!("failed to start redis service: {}", e)); + return Err(format!("failed to connect to redis service: {}", e)); } tokio::time::sleep(std::time::Duration::from_secs(1)).await } @@ -258,7 +251,7 @@ pub async fn start_redis(port: u16) -> Result { Err(e) => { attempts += 1; if attempts == 10 { - return Err(format!("failed to start redis service: {}", e)); + return Err(format!("failed to connect to redis service: {}", e)); } tokio::time::sleep(std::time::Duration::from_secs(1)).await } @@ -368,7 +361,6 @@ pub async fn start_chainhook_service( } pub struct TestSetupResult { - pub redis_process: Child, pub working_dir: String, pub chainhook_service_port: u16, pub redis_port: u16, @@ -393,7 +385,7 @@ pub async fn setup_stacks_chainhook_test( prometheus_port, ) = setup_chainhook_service_ports().unwrap_or_else(|e| panic!("test failed with error: {e}")); - let mut redis_process = start_redis(redis_port) + wait_for_redis(redis_port) .await .unwrap_or_else(|e| panic!("test failed with error: {e}")); flush_redis(redis_port); @@ -409,19 +401,16 @@ pub async fn setup_stacks_chainhook_test( let client = redis::Client::open(format!("redis://localhost:{redis_port}/")) .unwrap_or_else(|e| { flush_redis(redis_port); - redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); let mut connection = client.get_connection().unwrap_or_else(|e| { flush_redis(redis_port); - redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); let stacks_spec = predicate .into_specification_for_network(&StacksNetwork::Devnet) .unwrap_or_else(|e| { flush_redis(redis_port); - redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); @@ -432,14 +421,13 @@ pub async fn setup_stacks_chainhook_test( let (working_dir, tsv_dir) = create_tmp_working_dir().unwrap_or_else(|e| { flush_redis(redis_port); - redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); write_stacks_blocks_to_tsv(starting_chain_tip, &tsv_dir).unwrap_or_else(|e| { std::fs::remove_dir_all(&working_dir).unwrap(); flush_redis(redis_port); - redis_process.kill().unwrap(); + // redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); @@ -459,7 +447,6 @@ pub async fn setup_stacks_chainhook_test( .unwrap_or_else(|e| { std::fs::remove_dir_all(&working_dir).unwrap(); flush_redis(redis_port); - redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); @@ -469,11 +456,9 @@ pub async fn setup_stacks_chainhook_test( .unwrap_or_else(|e| { std::fs::remove_dir_all(&working_dir).unwrap(); flush_redis(redis_port); - redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); TestSetupResult { - redis_process, working_dir, chainhook_service_port, redis_port, @@ -495,14 +480,13 @@ pub async fn setup_bitcoin_chainhook_test(starting_chain_tip: u64) -> TestSetupR prometheus_port, ) = setup_chainhook_service_ports().unwrap_or_else(|e| panic!("test failed with error: {e}")); - let mut redis_process = start_redis(redis_port) + wait_for_redis(redis_port) .await .unwrap_or_else(|e| panic!("test failed with error: {e}")); flush_redis(redis_port); let (working_dir, tsv_dir) = create_tmp_working_dir().unwrap_or_else(|e| { flush_redis(redis_port); - redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); @@ -536,11 +520,9 @@ pub async fn setup_bitcoin_chainhook_test(starting_chain_tip: u64) -> TestSetupR .unwrap_or_else(|e| { std::fs::remove_dir_all(&working_dir).unwrap(); flush_redis(redis_port); - redis_process.kill().unwrap(); panic!("test failed with error: {e}"); }); TestSetupResult { - redis_process, working_dir, chainhook_service_port, redis_port, @@ -553,7 +535,7 @@ pub async fn setup_bitcoin_chainhook_test(starting_chain_tip: u64) -> TestSetupR } pub fn setup_chainhook_service_ports() -> Result<(u16, u16, u16, u16, u16, u16), String> { - let redis_port = get_free_port()?; + let redis_port = 6379; let chainhook_service_port = get_free_port()?; let stacks_rpc_port = get_free_port()?; let stacks_ingestion_port = get_free_port()?; diff --git a/components/chainhook-cli/src/service/tests/mod.rs b/components/chainhook-cli/src/service/tests/mod.rs index 274ece223..2321499af 100644 --- a/components/chainhook-cli/src/service/tests/mod.rs +++ b/components/chainhook-cli/src/service/tests/mod.rs @@ -6,7 +6,6 @@ use rocket::Shutdown; use std::fs::{self}; use std::net::TcpListener; use std::path::PathBuf; -use std::process::Child; use std::thread::sleep; use std::time::Duration; use test_case::test_case; @@ -380,7 +379,6 @@ async fn test_stacks_predicate_status_is_updated( expected_occurrences: Option, ) -> (PredicateStatus, Option, Option) { let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -401,12 +399,12 @@ async fn test_stacks_predicate_status_is_updated( ); let _ = call_register_predicate(&predicate, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); await_new_scanning_status_complete(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); for i in 1..blocks_to_mine + 1 { @@ -418,22 +416,22 @@ async fn test_stacks_predicate_status_is_updated( i + starting_chain_tip + 100, ) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); } sleep(Duration::new(2, 0)); let result = get_predicate_status(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); let found_predicate_status = filter_predicate_status_from_all_predicates(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); assert_eq!(found_predicate_status, result); (result, expected_evaluations, expected_occurrences) } @@ -454,7 +452,6 @@ async fn test_bitcoin_predicate_status_is_updated( expected_occurrences: Option, ) -> (PredicateStatus, Option, Option) { let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -478,12 +475,12 @@ async fn test_bitcoin_predicate_status_is_updated( let _ = call_register_predicate(&predicate, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); await_new_scanning_status_complete(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); for i in 1..blocks_to_mine + 1 { @@ -494,21 +491,21 @@ async fn test_bitcoin_predicate_status_is_updated( i + starting_chain_tip, ) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); } sleep(Duration::new(2, 0)); let result = get_predicate_status(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); let found_predicate_status = filter_predicate_status_from_all_predicates(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); assert_eq!(found_predicate_status, result); (result, expected_evaluations, expected_occurrences) } @@ -533,7 +530,6 @@ async fn test_bitcoin_predicate_status_is_updated_with_reorg( ) -> Result<(), String> { let starting_chain_tip = 0; let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -557,7 +553,7 @@ async fn test_bitcoin_predicate_status_is_updated_with_reorg( let _ = call_register_predicate(&predicate, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; let genesis_branch_key = '0'; let first_block_mined_height = starting_chain_tip + 1; @@ -570,13 +566,13 @@ async fn test_bitcoin_predicate_status_is_updated_with_reorg( block_height, ) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; } sleep(Duration::new(2, 0)); let status = get_predicate_status(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; assert_streaming_status((status, None, None)); let branch_key = '1'; @@ -590,7 +586,7 @@ async fn test_bitcoin_predicate_status_is_updated_with_reorg( fork_point, ) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; let reorg_point = last_block_mined_height + 1; let first_fork_block_mined_height = first_fork_block_mined_height + 1; @@ -604,12 +600,12 @@ async fn test_bitcoin_predicate_status_is_updated_with_reorg( block_height, ) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; if block_height == reorg_point { sleep(Duration::new(2, 0)); let status = get_predicate_status(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; assert_streaming_status((status, None, None)); } } @@ -617,9 +613,9 @@ async fn test_bitcoin_predicate_status_is_updated_with_reorg( sleep(Duration::new(2, 0)); let status = get_predicate_status(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); assert_confirmed_expiration_status((status, None, None)); Ok(()) } @@ -630,7 +626,6 @@ async fn test_bitcoin_predicate_status_is_updated_with_reorg( #[cfg_attr(not(feature = "redis_tests"), ignore)] async fn test_deregister_predicate(chain: Chain) -> Result<(), String> { let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -665,27 +660,27 @@ async fn test_deregister_predicate(chain: Chain) -> Result<(), String> { let _ = call_register_predicate(&predicate, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; let result = call_get_predicate(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; assert_eq!(result.get("status"), Some(&json!(200))); let result = call_deregister_predicate(&chain, uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; assert_eq!(result.get("status"), Some(&json!(200))); let mut attempts = 0; loop { let result = call_get_predicate(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; if result.get("status") == Some(&json!(404)) { break; } else if attempts == 3 { - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); panic!("predicate was not successfully derigistered"); } else { attempts += 1; @@ -693,7 +688,7 @@ async fn test_deregister_predicate(chain: Chain) -> Result<(), String> { } } - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); Ok(()) } @@ -737,7 +732,6 @@ async fn test_restarting_with_saved_predicates( serde_json::from_value(predicate).expect("failed to set up stacks chanhook spec for test"); let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -751,16 +745,16 @@ async fn test_restarting_with_saved_predicates( await_new_scanning_status_complete(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); sleep(Duration::new(2, 0)); let result = get_predicate_status(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process)) + .map_err(|e| cleanup_err(e, &working_dir, redis_port)) .unwrap(); - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); (result, None, None) } @@ -779,7 +773,6 @@ async fn it_allows_specifying_startup_predicate() -> Result<(), String> { serde_json::from_value(predicate).expect("failed to set up stacks chanhook spec for test"); let startup_predicate = ChainhookSpecificationNetworkMap::Stacks(predicate); let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -792,14 +785,14 @@ async fn it_allows_specifying_startup_predicate() -> Result<(), String> { await_new_scanning_status_complete(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; sleep(Duration::new(2, 0)); let result = get_predicate_status(uuid, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); assert_confirmed_expiration_status((result, None, None)); Ok(()) } @@ -820,7 +813,6 @@ async fn register_predicate_responds_409_if_uuid_in_use() -> Result<(), String> let startup_predicate = ChainhookSpecificationNetworkMap::Stacks(stacks_spec); let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -833,9 +825,9 @@ async fn register_predicate_responds_409_if_uuid_in_use() -> Result<(), String> let result = call_register_predicate(&predicate, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); assert_eq!(result.get("status"), Some(&json!(409))); Ok(()) } @@ -859,7 +851,6 @@ fn it_generates_open_api_spec() { async fn it_seeds_block_pool_on_startup() -> Result<(), String> { let starting_chain_tip = 3; let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -880,7 +871,7 @@ async fn it_seeds_block_pool_on_startup() -> Result<(), String> { i + starting_chain_tip + 100, ) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; } // we need these blocks to propagate through new stacks block events and save to the db, so give it some time sleep(Duration::new(1, 0)); @@ -899,7 +890,7 @@ async fn it_seeds_block_pool_on_startup() -> Result<(), String> { let stacks_db = open_readonly_stacks_db_conn(&db_path, &ctx).expect("unable to read stacks_db"); // validate that all blocks we just mined are saved as unconfirmed blocks in the database let unconfirmed_blocks = get_all_unconfirmed_blocks(&stacks_db, &ctx) - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; let mut unconfirmed_height = starting_chain_tip + 1; assert_eq!( blocks_to_mine, @@ -961,7 +952,7 @@ async fn it_seeds_block_pool_on_startup() -> Result<(), String> { next_block_height + 100, ) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; // mine the same block number we just mined, but on a different fork mine_stacks_block( @@ -972,13 +963,13 @@ async fn it_seeds_block_pool_on_startup() -> Result<(), String> { next_block_height + 100, ) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; sleep(Duration::new(1, 0)); // confirm that there was a reorg let metrics = call_ping(stacks_ingestion_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; let stacks_last_reorg_data = metrics.get("stacks").unwrap().get("last_reorg").unwrap(); let applied_blocks = stacks_last_reorg_data .get("applied_blocks") @@ -990,24 +981,18 @@ async fn it_seeds_block_pool_on_startup() -> Result<(), String> { .unwrap() .as_u64() .unwrap(); - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); assert_eq!(applied_blocks, 1); assert_eq!(rolled_back_blocks, 1); Ok(()) } -pub fn cleanup_err( - error: String, - working_dir: &str, - redis_port: u16, - redis_process: &mut Child, -) -> String { - cleanup(working_dir, redis_port, redis_process); +pub fn cleanup_err(error: String, working_dir: &str, redis_port: u16) -> String { + cleanup(working_dir, redis_port); format!("test failed with error: {error}") } -pub fn cleanup(working_dir: &str, redis_port: u16, redis_process: &mut Child) { +pub fn cleanup(working_dir: &str, redis_port: u16) { let _ = std::fs::remove_dir_all(working_dir); flush_redis(redis_port); - redis_process.kill().unwrap(); } diff --git a/components/chainhook-cli/src/service/tests/observer_tests.rs b/components/chainhook-cli/src/service/tests/observer_tests.rs index bfb512c03..449ee1378 100644 --- a/components/chainhook-cli/src/service/tests/observer_tests.rs +++ b/components/chainhook-cli/src/service/tests/observer_tests.rs @@ -30,7 +30,6 @@ use super::helpers::{ #[cfg_attr(not(feature = "redis_tests"), ignore)] async fn ping_endpoint_returns_metrics() -> Result<(), String> { let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -45,12 +44,12 @@ async fn ping_endpoint_returns_metrics() -> Result<(), String> { let predicate = build_stacks_payload(Some("devnet"), None, None, None, Some(uuid)); let _ = call_register_predicate(&predicate, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; sleep(Duration::new(1, 0)); let metrics = call_ping(stacks_ingestion_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; let result = metrics .get("stacks") .unwrap() @@ -59,7 +58,7 @@ async fn ping_endpoint_returns_metrics() -> Result<(), String> { assert_eq!(result, 1); sleep(Duration::new(1, 0)); - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); Ok(()) } @@ -67,7 +66,6 @@ async fn ping_endpoint_returns_metrics() -> Result<(), String> { #[cfg_attr(not(feature = "redis_tests"), ignore)] async fn prometheus_endpoint_returns_encoded_metrics() -> Result<(), String> { let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port, redis_port, @@ -82,18 +80,18 @@ async fn prometheus_endpoint_returns_encoded_metrics() -> Result<(), String> { let predicate = build_stacks_payload(Some("devnet"), None, None, None, Some(uuid)); call_register_predicate(&predicate, chainhook_service_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; sleep(Duration::new(1, 0)); let metrics = call_prometheus(prometheus_port) .await - .map_err(|e| cleanup_err(e, &working_dir, redis_port, &mut redis_process))?; + .map_err(|e| cleanup_err(e, &working_dir, redis_port))?; const EXPECTED: &str = "# HELP chainhook_stx_registered_predicates The number of Stacks predicates that have been registered by the Chainhook node.\n# TYPE chainhook_stx_registered_predicates gauge\nchainhook_stx_registered_predicates 1\n"; assert!(metrics.contains(EXPECTED)); sleep(Duration::new(1, 0)); - cleanup(&working_dir, redis_port, &mut redis_process); + cleanup(&working_dir, redis_port); Ok(()) } @@ -130,7 +128,6 @@ async fn await_observer_started(port: u16) { #[cfg_attr(not(feature = "redis_tests"), ignore)] async fn bitcoin_rpc_requests_are_forwarded(endpoint: &str, body: Value) { let TestSetupResult { - mut redis_process, working_dir, chainhook_service_port: _, redis_port, @@ -151,7 +148,6 @@ async fn bitcoin_rpc_requests_are_forwarded(endpoint: &str, body: Value) { assert!(response.get("error").is_none()); std::fs::remove_dir_all(&working_dir).unwrap(); flush_redis(redis_port); - redis_process.kill().unwrap(); } async fn start_and_ping_event_observer(config: EventObserverConfig, ingestion_port: u16) { diff --git a/dockerfiles/docker-compose.dev.yml b/dockerfiles/docker-compose.dev.yml new file mode 100644 index 000000000..1fc01fa2e --- /dev/null +++ b/dockerfiles/docker-compose.dev.yml @@ -0,0 +1,5 @@ +services: + redis: + image: "redis:latest" + ports: + - "6379:6379" From e44d84a0d739921a5a3ccae6e9643bdb85005f71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Wed, 16 Oct 2024 08:47:38 -0600 Subject: [PATCH 02/25] feat: parse `/stackerdb_chunks` Stacks node event (#653) Starts listening to the new `/stackerdb_chunks` Stacks node event and parses the incoming message. Parsers are taken from the `stacks_codec` crate, which has been updated with the latest serializers. For future PRs: * Start integrating the signer messages into the predicates framework * Store signer messages in a local SQLite DB for replay * Expand unit tests to include full Stacks node integration tests --------- Co-authored-by: Matthew Little --- .github/workflows/ci.yaml | 8 +- Cargo.lock | 4 +- Cargo.toml | 2 +- components/chainhook-cli/Cargo.toml | 2 +- components/chainhook-cli/src/service/mod.rs | 10 +- components/chainhook-sdk/Cargo.toml | 1 + .../src/chainhooks/stacks/mod.rs | 5 +- components/chainhook-sdk/src/indexer/mod.rs | 26 +- .../chainhook-sdk/src/indexer/stacks/mod.rs | 259 ++++++++++++++++-- .../chainhook-sdk/src/indexer/stacks/tests.rs | 40 +++ .../indexer/tests/helpers/stacks_events.rs | 26 ++ components/chainhook-sdk/src/observer/http.rs | 46 +++- components/chainhook-sdk/src/observer/mod.rs | 5 +- components/chainhook-types-rs/src/lib.rs | 2 + components/chainhook-types-rs/src/rosetta.rs | 9 +- components/chainhook-types-rs/src/signers.rs | 96 +++++++ 16 files changed, 504 insertions(+), 37 deletions(-) create mode 100644 components/chainhook-types-rs/src/signers.rs diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index fffd5a6ab..0b30ab741 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -20,7 +20,11 @@ jobs: strategy: fail-fast: false matrix: - suite: [cli, sdk] + include: + - suite: cli + features: redis_tests + - suite: sdk + features: stacks-signers defaults: run: working-directory: ./components/chainhook-${{ matrix.suite }} @@ -62,7 +66,7 @@ jobs: - name: Run tests run: | - cargo tarpaulin --skip-clean --out lcov ${{ env.TARPAULIN_FLAGS }} -- --test-threads=1 + cargo tarpaulin --skip-clean --out lcov --features ${{ matrix.features }} -- --test-threads=1 - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4 diff --git a/Cargo.lock b/Cargo.lock index 80b5f722f..17b1c93ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3538,8 +3538,8 @@ dependencies = [ [[package]] name = "stacks-codec" -version = "2.8.0" -source = "git+https://github.com/hirosystems/clarinet.git#b84e0d0228a2220c860150939bdc5207e8b505ff" +version = "2.7.0" +source = "git+https://github.com/hirosystems/clarinet.git?rev=3a2f9136abd85b265e538fbe51c808e9c09a06cb#3a2f9136abd85b265e538fbe51c808e9c09a06cb" dependencies = [ "clarity", "serde", diff --git a/Cargo.toml b/Cargo.toml index 04d88f5a5..2111714ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,4 +8,4 @@ default-members = ["components/chainhook-cli", "components/chainhook-sdk"] resolver = "2" [patch.crates-io] -stacks-codec = { git = "https://github.com/hirosystems/clarinet.git" } +stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "3a2f9136abd85b265e538fbe51c808e9c09a06cb" } diff --git a/components/chainhook-cli/Cargo.toml b/components/chainhook-cli/Cargo.toml index d26297236..ba1b4e07f 100644 --- a/components/chainhook-cli/Cargo.toml +++ b/components/chainhook-cli/Cargo.toml @@ -16,7 +16,7 @@ serde-redis = "0.12.0" hex = "0.4.3" rand = "0.8.5" chainhook-sdk = { version = "0.12.6", default-features = false, features = [ - "zeromq", + "zeromq", "stacks-signers" ], path = "../chainhook-sdk" } hiro-system-kit = "0.3.4" # hiro-system-kit = { path = "../../../clarinet/components/hiro-system-kit" } diff --git a/components/chainhook-cli/src/service/mod.rs b/components/chainhook-cli/src/service/mod.rs index 439500392..a6249a4c5 100644 --- a/components/chainhook-cli/src/service/mod.rs +++ b/components/chainhook-cli/src/service/mod.rs @@ -547,7 +547,10 @@ impl Service { }; } StacksChainEvent::ChainUpdatedWithMicroblocks(_) - | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {} + | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {}, + StacksChainEvent::ChainUpdatedWithStackerDbChunks(data) => { + // TODO(rafaelcr): Store signer data. + } }, Err(e) => { error!( @@ -615,7 +618,10 @@ impl Service { } } StacksChainEvent::ChainUpdatedWithMicroblocks(_) - | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {} + | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {}, + StacksChainEvent::ChainUpdatedWithStackerDbChunks(data) => { + // TODO(rafaelcr): Send via HTTP payload. + }, }; update_status_from_report( Chain::Stacks, diff --git a/components/chainhook-sdk/Cargo.toml b/components/chainhook-sdk/Cargo.toml index ec6b2e6a2..0e2a28eb7 100644 --- a/components/chainhook-sdk/Cargo.toml +++ b/components/chainhook-sdk/Cargo.toml @@ -52,5 +52,6 @@ test-case = "3.1.0" [features] default = ["hiro-system-kit/log"] zeromq = ["zmq"] +stacks-signers = [] debug = ["hiro-system-kit/debug"] release = ["hiro-system-kit/release_debug", "hiro-system-kit/full_log_level_prefix"] diff --git a/components/chainhook-sdk/src/chainhooks/stacks/mod.rs b/components/chainhook-sdk/src/chainhooks/stacks/mod.rs index 1b8e5bfa9..66f2205c4 100644 --- a/components/chainhook-sdk/src/chainhooks/stacks/mod.rs +++ b/components/chainhook-sdk/src/chainhooks/stacks/mod.rs @@ -721,7 +721,10 @@ pub fn evaluate_stacks_chainhooks_on_chain_event<'a>( }) } } - } + }, + StacksChainEvent::ChainUpdatedWithStackerDbChunks(data) => { + // TODO: Support predicates to send this data + }, } ( triggered_predicates, diff --git a/components/chainhook-sdk/src/indexer/mod.rs b/components/chainhook-sdk/src/indexer/mod.rs index 737f044a8..af484d466 100644 --- a/components/chainhook-sdk/src/indexer/mod.rs +++ b/components/chainhook-sdk/src/indexer/mod.rs @@ -109,7 +109,6 @@ impl Indexer { header: BlockHeader, ctx: &Context, ) -> Result, String> { - self.bitcoin_blocks_pool.process_header(header, ctx) } @@ -167,6 +166,31 @@ impl Indexer { pub fn get_pox_config(&mut self) -> PoxConfig { self.stacks_context.pox_config.clone() } + + #[cfg(feature = "stacks-signers")] + pub fn handle_stacks_marshalled_stackerdb_chunk( + &mut self, + marshalled_stackerdb_chunks: JsonValue, + receipt_time: u64, + ctx: &Context, + ) -> Result, String> { + use chainhook_types::StacksChainUpdatedWithStackerDbChunksData; + + let chunks = stacks::standardize_stacks_marshalled_stackerdb_chunks( + &self.config, + marshalled_stackerdb_chunks, + receipt_time, + &mut self.stacks_context, + ctx, + )?; + if chunks.len() > 0 { + Ok(Some(StacksChainEvent::ChainUpdatedWithStackerDbChunks( + StacksChainUpdatedWithStackerDbChunksData { chunks }, + ))) + } else { + Ok(None) + } + } } #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index 25b80c7d1..142e18bae 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -12,7 +12,7 @@ use clarity::vm::types::{SequenceData, Value as ClarityValue}; use hiro_system_kit::slog; use rocket::serde::json::Value as JsonValue; use rocket::serde::Deserialize; -use stacks_codec::codec::{StacksTransaction, TransactionAuth, TransactionPayload}; +use stacks_codec::codec::{NakamotoBlock, StacksTransaction, TransactionAuth, TransactionPayload}; use std::collections::{BTreeMap, HashMap, HashSet}; use std::convert::TryInto; use std::io::Cursor; @@ -273,6 +273,36 @@ pub struct ContractReadonlyCall { pub result: String, } +#[cfg(feature = "stacks-signers")] +#[derive(Deserialize, Debug)] +pub struct NewStackerDbChunkIssuer { + pub issuer_id: u32, + pub slots: Vec, +} + +#[cfg(feature = "stacks-signers")] +#[derive(Deserialize, Debug)] +pub struct NewStackerDbChunksContractId { + pub name: String, + pub issuer: Vec, +} + +#[cfg(feature = "stacks-signers")] +#[derive(Deserialize, Debug)] +pub struct NewSignerModifiedSlot { + pub sig: String, + pub data: String, + pub slot_id: u64, + pub version: u64, +} + +#[cfg(feature = "stacks-signers")] +#[derive(Deserialize, Debug)] +pub struct NewStackerDbChunks { + pub contract_id: NewStackerDbChunksContractId, + pub modified_slots: Vec, +} + pub fn standardize_stacks_serialized_block_header( serialized_block: &str, ) -> Result<(BlockIdentifier, BlockIdentifier), String> { @@ -354,8 +384,7 @@ pub fn standardize_stacks_block( } return Err(format!( "unable to standardize block #{} ({})", - block.block_height, - e + block.block_height, e )); } }; @@ -562,6 +591,200 @@ pub fn standardize_stacks_microblock_trail( Ok(microblocks) } +#[cfg(feature = "stacks-signers")] +pub fn standardize_stacks_marshalled_stackerdb_chunks( + _indexer_config: &IndexerConfig, + marshalled_stackerdb_chunks: JsonValue, + receipt_time: u64, + _chain_ctx: &mut StacksChainContext, + _ctx: &Context, +) -> Result, String> { + let mut stackerdb_chunks: NewStackerDbChunks = + serde_json::from_value(marshalled_stackerdb_chunks) + .map_err(|e| format!("unable to parse stackerdb chunks {e}"))?; + standardize_stacks_stackerdb_chunks(&mut stackerdb_chunks, receipt_time) +} + +#[cfg(feature = "stacks-signers")] +pub fn standardize_stacks_stackerdb_chunks( + stackerdb_chunks: &NewStackerDbChunks, + receipt_time: u64, +) -> Result, String> { + use stacks_codec::codec::BlockResponse; + use stacks_codec::codec::RejectCode; + use stacks_codec::codec::SignerMessage; + use stacks_codec::codec::ValidateRejectCode; + + let contract_id = &stackerdb_chunks.contract_id.name; + let mut parsed_chunks: Vec = vec![]; + for slot in stackerdb_chunks.modified_slots.iter() { + let data_bytes = match hex::decode(&slot.data) { + Ok(bytes) => bytes, + Err(e) => return Err(format!("unable to decode signer slot hex data: {e}")), + }; + let signer_message = + match SignerMessage::consensus_deserialize(&mut Cursor::new(&data_bytes)) { + Ok(message) => message, + Err(e) => return Err(format!("unable to deserialize SignerMessage: {e}")), + }; + let message = match signer_message { + SignerMessage::BlockProposal(block_proposal) => { + StacksSignerMessage::BlockProposal(BlockProposalData { + block: standardize_stacks_nakamoto_block(&block_proposal.block), + burn_height: block_proposal.burn_height, + reward_cycle: block_proposal.reward_cycle, + }) + } + SignerMessage::BlockResponse(block_response) => match block_response { + BlockResponse::Accepted((block_hash, sig)) => StacksSignerMessage::BlockResponse( + BlockResponseData::Accepted(BlockAcceptedResponse { + signer_signature_hash: block_hash.to_hex(), + sig: sig.to_hex(), + }), + ), + BlockResponse::Rejected(block_rejection) => StacksSignerMessage::BlockResponse( + BlockResponseData::Rejected(BlockRejectedResponse { + reason: block_rejection.reason, + reason_code: match block_rejection.reason_code { + RejectCode::ValidationFailed(validate_reject_code) => { + BlockRejectReasonCode::ValidationFailed( + match validate_reject_code { + ValidateRejectCode::BadBlockHash => { + BlockValidationFailedCode::BadBlockHash + } + ValidateRejectCode::BadTransaction => { + BlockValidationFailedCode::BadTransaction + } + ValidateRejectCode::InvalidBlock => { + BlockValidationFailedCode::InvalidBlock + } + ValidateRejectCode::ChainstateError => { + BlockValidationFailedCode::ChainstateError + } + ValidateRejectCode::UnknownParent => { + BlockValidationFailedCode::UnknownParent + } + ValidateRejectCode::NonCanonicalTenure => { + BlockValidationFailedCode::NonCanonicalTenure + } + ValidateRejectCode::NoSuchTenure => { + BlockValidationFailedCode::NoSuchTenure + } + }, + ) + } + RejectCode::NoSortitionView => BlockRejectReasonCode::NoSortitionView, + RejectCode::ConnectivityIssues => { + BlockRejectReasonCode::ConnectivityIssues + } + RejectCode::RejectedInPriorRound => { + BlockRejectReasonCode::RejectedInPriorRound + } + RejectCode::SortitionViewMismatch => { + BlockRejectReasonCode::SortitionViewMismatch + } + RejectCode::TestingDirective => BlockRejectReasonCode::TestingDirective, + }, + signer_signature_hash: block_rejection.signer_signature_hash.to_hex(), + chain_id: block_rejection.chain_id, + signature: block_rejection.signature.to_hex(), + }), + ), + }, + SignerMessage::BlockPushed(nakamoto_block) => { + StacksSignerMessage::BlockPushed(BlockPushedData { + block: standardize_stacks_nakamoto_block(&nakamoto_block), + }) + } + SignerMessage::MockSignature(_) => StacksSignerMessage::MockSignature, + SignerMessage::MockProposal(_) => StacksSignerMessage::MockProposal, + SignerMessage::MockBlock(_) => StacksSignerMessage::MockBlock, + }; + parsed_chunks.push(StacksStackerDbChunk { + contract: contract_id.clone(), + sig: slot.sig.clone(), + pubkey: get_signer_pubkey_from_stackerdb_chunk_slot(slot, &data_bytes)?, + message, + receipt_time, + }); + } + + Ok(parsed_chunks) +} + +#[cfg(feature = "stacks-signers")] +pub fn standardize_stacks_nakamoto_block(block: &NakamotoBlock) -> NakamotoBlockData { + use miniscript::bitcoin::hex::Case; + use miniscript::bitcoin::hex::DisplayHex; + + NakamotoBlockData { + header: NakamotoBlockHeaderData { + version: block.header.version, + chain_length: block.header.chain_length, + burn_spent: block.header.burn_spent, + consensus_hash: block.header.consensus_hash.to_hex(), + parent_block_id: block.header.parent_block_id.to_hex(), + tx_merkle_root: block.header.tx_merkle_root.to_hex(), + state_index_root: block.header.state_index_root.to_hex(), + timestamp: block.header.timestamp, + miner_signature: block.header.miner_signature.to_hex(), + signer_signature: block + .header + .signer_signature + .iter() + .map(|s| s.to_hex()) + .collect(), + pox_treatment: block + .header + .pox_treatment + .serialize_to_vec() + .to_hex_string(Case::Lower), + }, + // TODO(rafaelcr): Parse and return transactions. + transactions: vec![], + } +} + +#[cfg(feature = "stacks-signers")] +pub fn get_signer_pubkey_from_stackerdb_chunk_slot( + slot: &NewSignerModifiedSlot, + data_bytes: &Vec, +) -> Result { + use clarity::util::hash::Sha512Trunc256Sum; + use miniscript::bitcoin::{ + key::Secp256k1, + secp256k1::{ + ecdsa::{RecoverableSignature, RecoveryId}, + Message, + }, + }; + + let mut digest_bytes = slot.slot_id.to_be_bytes().to_vec(); + digest_bytes.extend(slot.version.to_be_bytes().to_vec()); + let data_bytes_hashed = Sha512Trunc256Sum::from_data(&data_bytes).to_bytes(); + digest_bytes.extend(data_bytes_hashed); + let digest = Sha512Trunc256Sum::from_data(&digest_bytes).to_bytes(); + + let sig_bytes = + hex::decode(&slot.sig).map_err(|e| format!("unable to decode signer slot sig: {e}"))?; + let (first, sig) = sig_bytes.split_at(1); + let rec_id = first[0]; + + let secp = Secp256k1::new(); + let recovery_id = + RecoveryId::from_i32(rec_id as i32).map_err(|e| format!("invalid recovery id: {e}"))?; + let signature = RecoverableSignature::from_compact(&sig, recovery_id) + .map_err(|e| format!("invalid signature: {e}"))?; + let message = + Message::from_digest_slice(&digest).map_err(|e| format!("invalid digest message: {e}"))?; + + let pubkey = secp + .recover_ecdsa(&message, &signature) + .map_err(|e| format!("unable to recover signer pubkey: {e}"))?; + + Ok(hex::encode(pubkey.serialize())) +} + pub fn get_value_description(raw_value: &str, ctx: &Context) -> String { let raw_value = match raw_value.strip_prefix("0x") { Some(raw_value) => raw_value, @@ -572,7 +795,6 @@ pub fn get_value_description(raw_value: &str, ctx: &Context) -> String { _ => return raw_value.to_string(), }; - match ClarityValue::consensus_deserialize(&mut Cursor::new(&value_bytes)) { Ok(value) => format!("{}", value), Err(e) => { @@ -646,12 +868,12 @@ pub fn get_tx_description( if let ClarityValue::Tuple(outter) = *data.data { if let Some(ClarityValue::Tuple(inner)) = outter.data_map.get("data") { if let ( - Some(ClarityValue::Principal(stacking_address)), - Some(ClarityValue::UInt(amount_ustx)), - Some(ClarityValue::Principal(delegate)), - Some(ClarityValue::Optional(pox_addr)), - Some(ClarityValue::Optional(unlock_burn_height)), - ) = ( + Some(ClarityValue::Principal(stacking_address)), + Some(ClarityValue::UInt(amount_ustx)), + Some(ClarityValue::Principal(delegate)), + Some(ClarityValue::Optional(pox_addr)), + Some(ClarityValue::Optional(unlock_burn_height)), + ) = ( &outter.data_map.get("stacker"), &inner.data_map.get("amount-ustx"), &inner.data_map.get("delegate-to"), @@ -671,17 +893,13 @@ pub fn get_tx_description( Some(value) => match &**value { ClarityValue::Tuple(address_comps) => { match ( - &address_comps - .data_map - .get("version"), + &address_comps.data_map.get("version"), &address_comps .data_map .get("hashbytes"), ) { ( - Some(ClarityValue::UInt( - _version, - )), + Some(ClarityValue::UInt(_version)), Some(ClarityValue::Sequence( SequenceData::Buffer( _hashbytes, @@ -706,14 +924,7 @@ pub fn get_tx_description( }, }), ); - return Ok(( - description, - tx_type, - 0, - 0, - "".to_string(), - None, - )); + return Ok((description, tx_type, 0, 0, "".to_string(), None)); } } } diff --git a/components/chainhook-sdk/src/indexer/stacks/tests.rs b/components/chainhook-sdk/src/indexer/stacks/tests.rs index 51d4d2a5c..e45be1648 100644 --- a/components/chainhook-sdk/src/indexer/stacks/tests.rs +++ b/components/chainhook-sdk/src/indexer/stacks/tests.rs @@ -398,3 +398,43 @@ fn into_chainhook_event_rejects_invalid_missing_event() { .into_chainhook_event() .expect_err("expected error on missing event"); } + +#[test] +#[cfg(feature = "stacks-signers")] +fn stackerdb_chunks_covert_into_signer_messages() { + use chainhook_types::{BlockResponseData, StacksSignerMessage}; + + use crate::indexer::tests::helpers::stacks_events::create_new_stackerdb_chunk; + + use super::standardize_stacks_stackerdb_chunks; + + let new_chunks = create_new_stackerdb_chunk( + "signers-1-1".to_string(), + "01fc3c06f6e0ae5b13c9bb53763661817e55c8e7f1ecab8b4d4b65b283d2dd39f0099e3ea1e25e765f4f0e1dfb0a432309a16a2ec10940e1a14cb9e9b1cbf27edc".to_string(), + "010074aff146904763a787aa14c614d0dd1fc63b537bdb2fd351cdf881f6db75f986005eb55250597b25acbf99d3dd3c2fa8189046e1b5d21309a44cbaf2b327c09b0159a01ed3f0094bfa9e5f72f5d894e12ce252081eab5396eb8bba137bddfc365b".to_string() + ); + let parsed_chunk = standardize_stacks_stackerdb_chunks(&new_chunks, 1729013425).unwrap(); + + assert_eq!(parsed_chunk.len(), 1); + let message = &parsed_chunk[0]; + assert_eq!(message.contract, "signers-1-1"); + assert_eq!( + message.pubkey, + "03c76290f48909b4d49e111d69236a138ce96df3e05f709e425153d99f4fe671b4" + ); + assert_eq!(message.sig, "01fc3c06f6e0ae5b13c9bb53763661817e55c8e7f1ecab8b4d4b65b283d2dd39f0099e3ea1e25e765f4f0e1dfb0a432309a16a2ec10940e1a14cb9e9b1cbf27edc"); + + match &message.message { + StacksSignerMessage::BlockResponse(block_response_data) => match block_response_data { + BlockResponseData::Accepted(block_accepted_response) => { + assert_eq!(block_accepted_response.sig, "005eb55250597b25acbf99d3dd3c2fa8189046e1b5d21309a44cbaf2b327c09b0159a01ed3f0094bfa9e5f72f5d894e12ce252081eab5396eb8bba137bddfc365b"); + assert_eq!( + block_accepted_response.signer_signature_hash, + "74aff146904763a787aa14c614d0dd1fc63b537bdb2fd351cdf881f6db75f986" + ); + } + _ => assert!(false), + }, + _ => assert!(false), + } +} diff --git a/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs b/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs index 4ca977bc5..e944b986f 100644 --- a/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs +++ b/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs @@ -119,3 +119,29 @@ pub fn create_new_event_from_stacks_event(event: StacksTransactionEventPayload) contract_event, } } + +#[cfg(feature = "stacks-signers")] +pub fn create_new_stackerdb_chunk( + contract_name: String, + slot_sig: String, + slot_data: String, +) -> crate::indexer::stacks::NewStackerDbChunks { + use crate::indexer::stacks::{ + NewSignerModifiedSlot, NewStackerDbChunkIssuer, NewStackerDbChunksContractId, + }; + crate::indexer::stacks::NewStackerDbChunks { + contract_id: NewStackerDbChunksContractId { + name: contract_name, + issuer: vec![NewStackerDbChunkIssuer { + issuer_id: 26, + slots: vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }], + }, + modified_slots: vec![NewSignerModifiedSlot { + sig: slot_sig, + data: slot_data, + slot_id: 1, + version: 141, + }], + } +} diff --git a/components/chainhook-sdk/src/observer/http.rs b/components/chainhook-sdk/src/observer/http.rs index 024874142..7468fa4a1 100644 --- a/components/chainhook-sdk/src/observer/http.rs +++ b/components/chainhook-sdk/src/observer/http.rs @@ -178,6 +178,50 @@ pub fn handle_new_stacks_block( success_response() } +#[cfg(feature = "stacks-signers")] +#[post("/stackerdb_chunks", format = "application/json", data = "")] +pub fn handle_stackerdb_chunks( + indexer_rw_lock: &State>>, + payload: Json, + background_job_tx: &State>>>, + ctx: &State, +) -> Result, Custom>> { + use std::time::{SystemTime, UNIX_EPOCH}; + + try_info!(ctx, "POST /stackerdb_chunks"); + + // Standardize the structure of the StackerDB chunk, and identify the kind of update that this new message would imply. + let Ok(epoch) = SystemTime::now().duration_since(UNIX_EPOCH) else { + return error_response("Unable to get system receipt_time".to_string(), ctx); + }; + let chain_event = match indexer_rw_lock.inner().write() { + Ok(mut indexer) => indexer + .handle_stacks_marshalled_stackerdb_chunk(payload.into_inner(), epoch.as_secs(), ctx), + Err(e) => { + return error_response(format!("Unable to acquire background_job_tx: {e}"), ctx); + } + }; + + match chain_event { + Ok(Some(chain_event)) => { + if let Err(e) = background_job_tx.lock().map(|tx| { + tx.send(ObserverCommand::PropagateStacksChainEvent(chain_event)) + .map_err(|e| format!("Unable to send stacks chain event: {}", e)) + }) { + return error_response(format!("unable to acquire background_job_tx: {e}"), ctx); + } + } + Ok(None) => { + try_info!(ctx, "No chain event was generated"); + } + Err(e) => { + return error_response(format!("Chain event error: {e}"), ctx); + } + } + + success_response() +} + #[post( "/new_microblocks", format = "application/json", @@ -249,7 +293,7 @@ pub fn handle_new_mempool_tx( tx.send(ObserverCommand::PropagateStacksMempoolEvent( StacksChainMempoolEvent::TransactionsAdmitted(transactions), )) - .map_err(|e| format!("Unable to send stacks chain event: {}", e)) + .map_err(|e| format!("Unable to send stacks chain event: {}", e)) }) { return error_response(format!("unable to acquire background_job_tx: {e}"), ctx); } diff --git a/components/chainhook-sdk/src/observer/mod.rs b/components/chainhook-sdk/src/observer/mod.rs index 776e2e781..0e9a15cd6 100644 --- a/components/chainhook-sdk/src/observer/mod.rs +++ b/components/chainhook-sdk/src/observer/mod.rs @@ -1026,7 +1026,10 @@ pub async fn start_stacks_event_observer( http::handle_mined_block, http::handle_mined_microblock, ]; - + #[cfg(feature = "stacks-signers")] + { + routes.append(&mut routes![http::handle_stackerdb_chunks]); + } if bitcoin_rpc_proxy_enabled { routes.append(&mut routes![http::handle_bitcoin_rpc_call]); routes.append(&mut routes![http::handle_bitcoin_wallet_rpc_call]); diff --git a/components/chainhook-types-rs/src/lib.rs b/components/chainhook-types-rs/src/lib.rs index b015b6c75..a0251a883 100644 --- a/components/chainhook-types-rs/src/lib.rs +++ b/components/chainhook-types-rs/src/lib.rs @@ -9,12 +9,14 @@ mod events; mod ordinals; mod processors; mod rosetta; +mod signers; pub use contract_interface::*; pub use events::*; pub use ordinals::*; pub use processors::*; pub use rosetta::*; +pub use signers::*; pub const DEFAULT_STACKS_NODE_RPC: &str = "http://localhost:20443"; diff --git a/components/chainhook-types-rs/src/rosetta.rs b/components/chainhook-types-rs/src/rosetta.rs index 9ebbb18e6..804f828aa 100644 --- a/components/chainhook-types-rs/src/rosetta.rs +++ b/components/chainhook-types-rs/src/rosetta.rs @@ -1,7 +1,7 @@ use super::bitcoin::{TxIn, TxOut}; use crate::contract_interface::ContractInterface; use crate::ordinals::OrdinalOperation; -use crate::{events::*, Brc20Operation, DEFAULT_STACKS_NODE_RPC}; +use crate::{events::*, Brc20Operation, StacksStackerDbChunk, DEFAULT_STACKS_NODE_RPC}; use schemars::JsonSchema; use std::cmp::Ordering; use std::collections::HashSet; @@ -667,6 +667,11 @@ pub struct BitcoinChainUpdatedWithReorgData { pub confirmed_blocks: Vec, } +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct StacksChainUpdatedWithStackerDbChunksData { + pub chunks: Vec, +} + #[allow(dead_code)] #[derive(Debug, Clone, PartialEq, Serialize)] pub enum StacksChainEvent { @@ -674,6 +679,7 @@ pub enum StacksChainEvent { ChainUpdatedWithReorg(StacksChainUpdatedWithReorgData), ChainUpdatedWithMicroblocks(StacksChainUpdatedWithMicroblocksData), ChainUpdatedWithMicroblocksReorg(StacksChainUpdatedWithMicroblocksReorgData), + ChainUpdatedWithStackerDbChunks(StacksChainUpdatedWithStackerDbChunksData), } impl StacksChainEvent { @@ -703,6 +709,7 @@ impl StacksChainEvent { .microblocks_to_apply .first() .and_then(|b| Some(&b.metadata.anchor_block_identifier)), + StacksChainEvent::ChainUpdatedWithStackerDbChunks(_) => None, } } } diff --git a/components/chainhook-types-rs/src/signers.rs b/components/chainhook-types-rs/src/signers.rs new file mode 100644 index 000000000..c1fd4e873 --- /dev/null +++ b/components/chainhook-types-rs/src/signers.rs @@ -0,0 +1,96 @@ +use crate::StacksTransactionData; + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct NakamotoBlockHeaderData { + pub version: u8, + pub chain_length: u64, + pub burn_spent: u64, + pub consensus_hash: String, + pub parent_block_id: String, + pub tx_merkle_root: String, + pub state_index_root: String, + pub timestamp: u64, + pub miner_signature: String, + pub signer_signature: Vec, + pub pox_treatment: String, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct NakamotoBlockData { + pub header: NakamotoBlockHeaderData, + pub transactions: Vec, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct BlockProposalData { + // TODO(rafaelcr): Include `block_hash` and `index_block_hash`. + pub block: NakamotoBlockData, + pub burn_height: u64, + pub reward_cycle: u64, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct BlockAcceptedResponse { + pub signer_signature_hash: String, + pub sig: String, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub enum BlockValidationFailedCode { + BadBlockHash = 0, + BadTransaction = 1, + InvalidBlock = 2, + ChainstateError = 3, + UnknownParent = 4, + NonCanonicalTenure = 5, + NoSuchTenure = 6, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub enum BlockRejectReasonCode { + ValidationFailed(BlockValidationFailedCode), + ConnectivityIssues, + RejectedInPriorRound, + NoSortitionView, + SortitionViewMismatch, + TestingDirective, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct BlockRejectedResponse { + pub reason: String, + pub reason_code: BlockRejectReasonCode, + pub signer_signature_hash: String, + pub chain_id: u32, + pub signature: String, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub enum BlockResponseData { + Accepted(BlockAcceptedResponse), + Rejected(BlockRejectedResponse), +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct BlockPushedData { + pub block: NakamotoBlockData, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub enum StacksSignerMessage { + BlockProposal(BlockProposalData), + BlockResponse(BlockResponseData), + BlockPushed(BlockPushedData), + MockProposal, + MockSignature, + MockBlock, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct StacksStackerDbChunk { + pub contract: String, + pub sig: String, + pub pubkey: String, + pub message: StacksSignerMessage, + pub receipt_time: u64, +} From ff66bb2eab771c16fead154607a280664eb6be4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Tue, 22 Oct 2024 10:13:27 -0600 Subject: [PATCH 03/25] feat(ts-client): add signer messages to stacks payloads (#657) Expands the TS client to include new signer messages, and adds missing `signer_bitvec` and `signer_signatures` fields to block metadata. --- .../chainhook-sdk/src/indexer/stacks/mod.rs | 8 +- components/chainhook-types-rs/src/signers.rs | 21 ++-- components/client/typescript/src/index.ts | 1 + .../typescript/src/schemas/stacks/payload.ts | 7 +- .../typescript/src/schemas/stacks/signers.ts | 104 ++++++++++++++++++ 5 files changed, 127 insertions(+), 14 deletions(-) create mode 100644 components/client/typescript/src/schemas/stacks/signers.ts diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index dd2dd9dbc..c935adbe4 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -749,9 +749,11 @@ pub fn standardize_stacks_stackerdb_chunks( block: standardize_stacks_nakamoto_block(&nakamoto_block), }) } - SignerMessage::MockSignature(_) => StacksSignerMessage::MockSignature, - SignerMessage::MockProposal(_) => StacksSignerMessage::MockProposal, - SignerMessage::MockBlock(_) => StacksSignerMessage::MockBlock, + SignerMessage::MockSignature(_) + | SignerMessage::MockProposal(_) + | SignerMessage::MockBlock(_) => { + continue; + } }; parsed_chunks.push(StacksStackerDbChunk { contract: contract_id.clone(), diff --git a/components/chainhook-types-rs/src/signers.rs b/components/chainhook-types-rs/src/signers.rs index c1fd4e873..96d1f3ea7 100644 --- a/components/chainhook-types-rs/src/signers.rs +++ b/components/chainhook-types-rs/src/signers.rs @@ -37,16 +37,17 @@ pub struct BlockAcceptedResponse { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub enum BlockValidationFailedCode { - BadBlockHash = 0, - BadTransaction = 1, - InvalidBlock = 2, - ChainstateError = 3, - UnknownParent = 4, - NonCanonicalTenure = 5, - NoSuchTenure = 6, + BadBlockHash, + BadTransaction, + InvalidBlock, + ChainstateError, + UnknownParent, + NonCanonicalTenure, + NoSuchTenure, } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum BlockRejectReasonCode { ValidationFailed(BlockValidationFailedCode), ConnectivityIssues, @@ -66,6 +67,7 @@ pub struct BlockRejectedResponse { } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(tag = "type", content = "data")] pub enum BlockResponseData { Accepted(BlockAcceptedResponse), Rejected(BlockRejectedResponse), @@ -77,13 +79,12 @@ pub struct BlockPushedData { } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(tag = "type", content = "data")] pub enum StacksSignerMessage { BlockProposal(BlockProposalData), BlockResponse(BlockResponseData), BlockPushed(BlockPushedData), - MockProposal, - MockSignature, - MockBlock, + // TODO(rafaelcr): Add mock messages } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] diff --git a/components/client/typescript/src/index.ts b/components/client/typescript/src/index.ts index 8cbaab0bb..da193fc8e 100644 --- a/components/client/typescript/src/index.ts +++ b/components/client/typescript/src/index.ts @@ -152,6 +152,7 @@ export * from './schemas/payload'; export * from './schemas/predicate'; export * from './schemas/stacks/if_this'; export * from './schemas/stacks/payload'; +export * from './schemas/stacks/signers'; export * from './schemas/stacks/tx_events'; export * from './schemas/stacks/tx_kind'; export * from './server'; diff --git a/components/client/typescript/src/schemas/stacks/payload.ts b/components/client/typescript/src/schemas/stacks/payload.ts index cc1653e1e..ef5969eb3 100644 --- a/components/client/typescript/src/schemas/stacks/payload.ts +++ b/components/client/typescript/src/schemas/stacks/payload.ts @@ -8,6 +8,7 @@ import { import { StacksTransactionEventSchema } from './tx_events'; import { StacksTransactionKindSchema } from './tx_kind'; import { StacksIfThisSchema } from './if_this'; +import { StacksSignerMessageEventSchema } from './signers'; export const StacksExecutionCostSchema = Type.Optional( Type.Object({ @@ -52,7 +53,7 @@ export const StacksTransactionMetadataSchema = Type.Object({ }); export type StacksTransactionMetadata = Static; -const StacksTransactionSchema = Type.Object({ +export const StacksTransactionSchema = Type.Object({ transaction_identifier: TransactionIdentifierSchema, operations: Type.Array(RosettaOperationSchema), metadata: StacksTransactionMetadataSchema, @@ -101,9 +102,13 @@ export const StacksEventSchema = Type.Object({ }); export type StacksEvent = Static; +export const StacksNonConsensusEventSchema = Type.Union([StacksSignerMessageEventSchema]); +export type StacksNonConsensusEvent = Static; + export const StacksPayloadSchema = Type.Object({ apply: Type.Array(StacksEventSchema), rollback: Type.Array(StacksEventSchema), + events: Type.Array(StacksNonConsensusEventSchema), chainhook: Type.Object({ uuid: Type.String(), predicate: StacksIfThisSchema, diff --git a/components/client/typescript/src/schemas/stacks/signers.ts b/components/client/typescript/src/schemas/stacks/signers.ts new file mode 100644 index 000000000..701486901 --- /dev/null +++ b/components/client/typescript/src/schemas/stacks/signers.ts @@ -0,0 +1,104 @@ +import { Static, Type } from '@fastify/type-provider-typebox'; +import { BlockIdentifierSchema } from '../common'; +import { StacksTransactionSchema } from './payload'; + +export const StacksNakamotoBlockHeaderSchema = Type.Object({ + version: Type.Integer(), + chain_length: Type.Integer(), + burn_spent: Type.Integer(), + consensus_hash: Type.String(), + parent_block_id: Type.String(), + tx_merkle_root: Type.String(), + state_index_root: Type.String(), + timestamp: Type.Integer(), + miner_signature: Type.String(), + signer_signature: Type.Array(Type.String()), + pox_treatment: Type.String(), +}); +export type StacksNakamotoBlockHeader = Static; + +export const StacksNakamotoBlockSchema = Type.Object({ + header: StacksNakamotoBlockHeaderSchema, + transactions: Type.Array(StacksTransactionSchema), +}); +export type StacksNakamotoBlock = Static; + +export const StacksSignerMessageBlockProposalSchema = Type.Object({ + type: Type.Literal('BlockProposal'), + data: Type.Object({ + block: StacksNakamotoBlockSchema, + burn_height: Type.Integer(), + reward_cycle: Type.Integer(), + }), +}); +export type StacksSignerMessageBlockProposal = Static< + typeof StacksSignerMessageBlockProposalSchema +>; + +export const StacksSignerMessageBlockResponseAcceptedSchema = Type.Object({ + type: Type.Literal('Accepted'), + data: Type.Object({ + signer_signature_hash: Type.String(), + sig: Type.String(), + }), +}); +export type StacksSignerMessageBlockResponseAccepted = Static< + typeof StacksSignerMessageBlockResponseAcceptedSchema +>; + +export const StacksSignerMessageBlockResponseRejectedSchema = Type.Object({ + type: Type.Literal('Rejected'), + data: Type.Object({ + reason: Type.String(), + reason_code: Type.Union([ + Type.Literal('VALIDATION_FAILED'), + Type.Literal('CONNECTIVITY_ISSUES'), + Type.Literal('REJECTED_IN_PRIOR_ROUND'), + Type.Literal('NO_SORTITION_VIEW'), + Type.Literal('SORTITION_VIEW_MISMATCH'), + Type.Literal('TESTING_DIRECTIVE'), + ]), + signer_signature_hash: Type.String(), + chain_id: Type.Integer(), + signature: Type.String(), + }), +}); +export type StacksSignerMessageBlockResponseRejected = Static< + typeof StacksSignerMessageBlockResponseRejectedSchema +>; + +export const StacksSignerMessageBlockResponseSchema = Type.Object({ + type: Type.Literal('BlockResponse'), + data: Type.Union([ + StacksSignerMessageBlockResponseAcceptedSchema, + StacksSignerMessageBlockResponseRejectedSchema, + ]), +}); +export type StacksSignerMessageBlockResponse = Static< + typeof StacksSignerMessageBlockResponseSchema +>; + +export const StacksSignerMessageBlockPushedSchema = Type.Object({ + type: Type.Literal('BlockPushed'), + data: Type.Object({ + block: StacksNakamotoBlockSchema, + }), +}); +export type StacksSignerMessageBlockPushed = Static; + +export const StacksSignerMessageSchema = Type.Union([ + StacksSignerMessageBlockProposalSchema, + StacksSignerMessageBlockResponseSchema, + StacksSignerMessageBlockPushedSchema, +]); +export type StacksSignerMessage = Static; + +export const StacksSignerMessageEventSchema = Type.Object({ + contract: Type.String(), + sig: Type.String(), + pubkey: Type.String(), + message: StacksSignerMessageSchema, + received_at: Type.Integer(), + received_at_block: BlockIdentifierSchema, +}); +export type StacksSignerMessageEvent = Static; From aee14bc693573f403f8a6d8eafe7b30d2ca76b54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Wed, 23 Oct 2024 15:46:33 -0600 Subject: [PATCH 04/25] feat: include signer messages in Stacks predicate payloads (#656) --- Cargo.lock | 5 +- Cargo.toml | 2 +- components/chainhook-cli/src/scan/stacks.rs | 4 + components/chainhook-cli/src/service/mod.rs | 7 +- .../src/chainhooks/stacks/mod.rs | 144 +++++++++++++++--- .../fixtures/stacks/testnet/occurrence.json | 3 +- .../chainhook-sdk/src/chainhooks/tests/mod.rs | 2 + components/chainhook-sdk/src/indexer/mod.rs | 27 +++- .../src/indexer/stacks/blocks_pool.rs | 9 ++ .../chainhook-sdk/src/indexer/stacks/mod.rs | 120 ++++++++++----- .../chainhook-sdk/src/indexer/stacks/tests.rs | 10 +- .../indexer/tests/helpers/stacks_events.rs | 15 +- components/chainhook-sdk/src/observer/http.rs | 4 +- components/chainhook-sdk/src/observer/mod.rs | 17 ++- components/chainhook-sdk/src/utils/mod.rs | 3 +- components/chainhook-types-rs/src/rosetta.rs | 21 ++- components/chainhook-types-rs/src/signers.rs | 4 +- .../client/typescript/package-lock.json | 4 +- components/client/typescript/package.json | 2 +- .../typescript/src/schemas/stacks/if_this.ts | 22 +++ .../typescript/src/schemas/stacks/payload.ts | 6 +- .../typescript/src/schemas/stacks/signers.ts | 20 +-- docs/chainhook-openapi.json | 42 +++++ 23 files changed, 381 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e5a2a023..23d5c23e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3549,12 +3549,11 @@ dependencies = [ [[package]] name = "stacks-codec" -version = "2.7.0" -source = "git+https://github.com/hirosystems/clarinet.git?rev=3a2f9136abd85b265e538fbe51c808e9c09a06cb#3a2f9136abd85b265e538fbe51c808e9c09a06cb" +version = "2.9.0" +source = "git+https://github.com/hirosystems/clarinet.git?rev=b0683675115562d719ed4b5245f620e0990030a0#b0683675115562d719ed4b5245f620e0990030a0" dependencies = [ "clarity", "serde", - "wsts", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 2111714ed..e7694ce18 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,4 +8,4 @@ default-members = ["components/chainhook-cli", "components/chainhook-sdk"] resolver = "2" [patch.crates-io] -stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "3a2f9136abd85b265e538fbe51c808e9c09a06cb" } +stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "b0683675115562d719ed4b5245f620e0990030a0" } diff --git a/components/chainhook-cli/src/scan/stacks.rs b/components/chainhook-cli/src/scan/stacks.rs index 1e3b100e5..9e021513e 100644 --- a/components/chainhook-cli/src/scan/stacks.rs +++ b/components/chainhook-cli/src/scan/stacks.rs @@ -339,6 +339,8 @@ pub async fn scan_stacks_chainstate_via_rocksdb_using_predicate( chainhook: predicate_spec, apply: hits_per_blocks, rollback: vec![], + // TODO(rafaelcr): Query for non consensus events which fall between block timestamps to fill in here + events: vec![] }; let res = match handle_stacks_hook_action( trigger, @@ -533,6 +535,8 @@ pub async fn scan_stacks_chainstate_via_csv_using_predicate( chainhook: predicate_spec, apply: hits_per_blocks, rollback: vec![], + // TODO(rafaelcr): Consider StackerDB chunks that come from TSVs. + events: vec![] }; match handle_stacks_hook_action(trigger, &proofs, &config.get_event_observer_config(), ctx) { diff --git a/components/chainhook-cli/src/service/mod.rs b/components/chainhook-cli/src/service/mod.rs index a6249a4c5..7e50be6d3 100644 --- a/components/chainhook-cli/src/service/mod.rs +++ b/components/chainhook-cli/src/service/mod.rs @@ -548,8 +548,9 @@ impl Service { } StacksChainEvent::ChainUpdatedWithMicroblocks(_) | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {}, - StacksChainEvent::ChainUpdatedWithStackerDbChunks(data) => { + StacksChainEvent::ChainUpdatedWithNonConsensusEvents(data) => { // TODO(rafaelcr): Store signer data. + println!("signer message: {:?}", data); } }, Err(e) => { @@ -619,8 +620,8 @@ impl Service { } StacksChainEvent::ChainUpdatedWithMicroblocks(_) | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {}, - StacksChainEvent::ChainUpdatedWithStackerDbChunks(data) => { - // TODO(rafaelcr): Send via HTTP payload. + StacksChainEvent::ChainUpdatedWithNonConsensusEvents(_) => { + // TODO(rafaelcr): Expire signer message predicates when appropriate }, }; update_status_from_report( diff --git a/components/chainhook-sdk/src/chainhooks/stacks/mod.rs b/components/chainhook-sdk/src/chainhooks/stacks/mod.rs index 66f2205c4..1eb2c7fbf 100644 --- a/components/chainhook-sdk/src/chainhooks/stacks/mod.rs +++ b/components/chainhook-sdk/src/chainhooks/stacks/mod.rs @@ -1,15 +1,15 @@ use crate::observer::EventObserverConfig; use crate::utils::{AbstractStacksBlock, Context, MAX_BLOCK_HEIGHTS_ENTRIES}; +use super::types::validate_txid; use super::types::{ append_error_context, BlockIdentifierIndexRule, ChainhookInstance, ExactMatchingRule, HookAction, }; -use super::types::validate_txid; use chainhook_types::{ - BlockIdentifier, StacksChainEvent, StacksNetwork, StacksTransactionData, - StacksTransactionEvent, StacksTransactionEventPayload, StacksTransactionKind, - TransactionIdentifier, + BlockIdentifier, StacksChainEvent, StacksNetwork, StacksNonConsensusEventData, + StacksTransactionData, StacksTransactionEvent, StacksTransactionEventPayload, + StacksTransactionKind, TransactionIdentifier, }; use clarity::codec::StacksMessageCodec; use clarity::vm::types::{ @@ -259,6 +259,8 @@ pub enum StacksPredicate { NftEvent(StacksNftEventBasedPredicate), StxEvent(StacksStxEventBasedPredicate), Txid(ExactMatchingRule), + #[cfg(feature = "stacks-signers")] + SignerMessage(StacksSignerMessagePredicate), } impl StacksPredicate { @@ -307,11 +309,28 @@ impl StacksPredicate { )); } } + #[cfg(feature = "stacks-signers")] + StacksPredicate::SignerMessage(StacksSignerMessagePredicate::FromSignerPubKey(_)) => { + // TODO(rafaelcr): Validate pubkey format + } + #[cfg(feature = "stacks-signers")] + StacksPredicate::SignerMessage(StacksSignerMessagePredicate::AfterTimestamp(_)) => {} } Ok(()) } } +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub enum StacksSignerMessagePredicate { + AfterTimestamp(u64), + FromSignerPubKey(String), +} + +impl StacksSignerMessagePredicate { + // TODO(rafaelcr): Write validators +} + #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct StacksContractCallBasedPredicate { @@ -457,6 +476,7 @@ pub struct StacksTriggerChainhook<'a> { pub chainhook: &'a StacksChainhookInstance, pub apply: Vec<(Vec<&'a StacksTransactionData>, &'a dyn AbstractStacksBlock)>, pub rollback: Vec<(Vec<&'a StacksTransactionData>, &'a dyn AbstractStacksBlock)>, + pub events: Vec<&'a StacksNonConsensusEventData>, } #[derive(Clone, Debug)] @@ -480,21 +500,18 @@ pub struct StacksChainhookPayload { pub struct StacksChainhookOccurrencePayload { pub apply: Vec, pub rollback: Vec, + pub events: Vec, pub chainhook: StacksChainhookPayload, } impl StacksChainhookOccurrencePayload { - pub fn from_trigger( - trigger: StacksTriggerChainhook<'_>, - ) -> StacksChainhookOccurrencePayload { + pub fn from_trigger(trigger: StacksTriggerChainhook<'_>) -> StacksChainhookOccurrencePayload { StacksChainhookOccurrencePayload { apply: trigger .apply .into_iter() .map(|(transactions, block)| { - let transactions = transactions - .into_iter().cloned() - .collect::>(); + let transactions = transactions.into_iter().cloned().collect::>(); StacksApplyTransactionPayload { block_identifier: block.get_identifier().clone(), transactions, @@ -505,9 +522,7 @@ impl StacksChainhookOccurrencePayload { .rollback .into_iter() .map(|(transactions, block)| { - let transactions = transactions - .into_iter().cloned() - .collect::>(); + let transactions = transactions.into_iter().cloned().collect::>(); StacksRollbackTransactionPayload { block_identifier: block.get_identifier().clone(), transactions, @@ -517,6 +532,7 @@ impl StacksChainhookOccurrencePayload { chainhook: StacksChainhookPayload { uuid: trigger.chainhook.uuid.clone(), }, + events: trigger.events.into_iter().cloned().collect::>(), } } } @@ -593,6 +609,7 @@ pub fn evaluate_stacks_chainhooks_on_chain_event<'a>( chainhook, apply, rollback, + events: vec![], }) } } @@ -621,6 +638,7 @@ pub fn evaluate_stacks_chainhooks_on_chain_event<'a>( chainhook, apply, rollback, + events: vec![], }) } } @@ -657,6 +675,7 @@ pub fn evaluate_stacks_chainhooks_on_chain_event<'a>( chainhook, apply, rollback, + events: vec![], }) } } @@ -718,13 +737,37 @@ pub fn evaluate_stacks_chainhooks_on_chain_event<'a>( chainhook, apply, rollback, + events: vec![], }) } } - }, - StacksChainEvent::ChainUpdatedWithStackerDbChunks(data) => { - // TODO: Support predicates to send this data - }, + } + #[cfg(feature = "stacks-signers")] + StacksChainEvent::ChainUpdatedWithNonConsensusEvents(data) => { + if let Some(first_event) = data.events.first() { + for chainhook in active_chainhooks.iter() { + evaluated_predicates + .insert(chainhook.uuid.as_str(), &first_event.received_at_block); + let (occurrences, mut expirations) = + evaluate_stacks_predicate_on_non_consensus_events( + &data.events, + chainhook, + ctx, + ); + expired_predicates.append(&mut expirations); + if occurrences.len() > 0 { + triggered_predicates.push(StacksTriggerChainhook { + chainhook, + apply: vec![], + rollback: vec![], + events: occurrences, + }); + } + } + } + } + #[cfg(not(feature = "stacks-signers"))] + StacksChainEvent::ChainUpdatedWithNonConsensusEvents(_) => {} } ( triggered_predicates, @@ -795,7 +838,45 @@ pub fn evaluate_stacks_predicate_on_block<'a>( | StacksPredicate::StxEvent(_) | StacksPredicate::PrintEvent(_) | StacksPredicate::Txid(_) => unreachable!(), + #[cfg(feature = "stacks-signers")] + StacksPredicate::SignerMessage(_) => false, + } +} + +#[cfg(feature = "stacks-signers")] +pub fn evaluate_stacks_predicate_on_non_consensus_events<'a>( + events: &'a Vec, + chainhook: &'a StacksChainhookInstance, + _ctx: &Context, +) -> ( + Vec<&'a StacksNonConsensusEventData>, + BTreeMap<&'a str, &'a BlockIdentifier>, +) { + let mut occurrences = vec![]; + let expired_predicates = BTreeMap::new(); + for event in events { + match &chainhook.predicate { + StacksPredicate::SignerMessage(StacksSignerMessagePredicate::AfterTimestamp( + timestamp, + )) => { + if event.received_at_ms >= *timestamp { + occurrences.push(event); + } + } + StacksPredicate::SignerMessage(StacksSignerMessagePredicate::FromSignerPubKey(_)) => { + // TODO(rafaelcr): Evaluate on pubkey + } + StacksPredicate::BlockHeight(_) + | StacksPredicate::ContractDeployment(_) + | StacksPredicate::ContractCall(_) + | StacksPredicate::FtEvent(_) + | StacksPredicate::NftEvent(_) + | StacksPredicate::StxEvent(_) + | StacksPredicate::PrintEvent(_) + | StacksPredicate::Txid(_) => unreachable!(), + }; } + (occurrences, expired_predicates) } pub fn evaluate_stacks_predicate_on_transaction<'a>( @@ -819,7 +900,7 @@ pub fn evaluate_stacks_predicate_on_transaction<'a>( _ => false, }, StacksPredicate::ContractDeployment(StacksContractDeploymentPredicate::ImplementTrait( - stacks_trait, + _stacks_trait, )) => match &transaction.metadata.kind { StacksTransactionKind::ContractDeployment(_actual_deployment) => { ctx.try_log(|logger| { @@ -952,7 +1033,9 @@ pub fn evaluate_stacks_predicate_on_transaction<'a>( } StacksPredicate::PrintEvent(expected_event) => { for event in transaction.metadata.receipt.events.iter() { - if let StacksTransactionEventPayload::SmartContractEvent(actual) = &event.event_payload { + if let StacksTransactionEventPayload::SmartContractEvent(actual) = + &event.event_payload + { if actual.topic == "print" { match expected_event { StacksPrintEventBasedPredicate::Contains { @@ -1006,9 +1089,29 @@ pub fn evaluate_stacks_predicate_on_transaction<'a>( txid.eq(&transaction.transaction_identifier.hash) } StacksPredicate::BlockHeight(_) => unreachable!(), + #[cfg(feature = "stacks-signers")] + StacksPredicate::SignerMessage(_) => false, } } +fn serialize_stacks_non_consensus_event( + event: &StacksNonConsensusEventData, + _ctx: &Context, +) -> serde_json::Value { + use chainhook_types::StacksNonConsensusEventPayloadData; + + let payload = match &event.payload { + StacksNonConsensusEventPayloadData::SignerMessage(chunk) => { + json!({"type": "SignerMessage", "data": chunk}) + } + }; + json!({ + "payload": payload, + "received_at": event.received_at_ms, + "received_at_block": event.received_at_block, + }) +} + fn serialize_stacks_block( block: &dyn AbstractStacksBlock, transactions: Vec<&StacksTransactionData>, @@ -1238,7 +1341,7 @@ pub fn serialized_decoded_clarity_value(hex_value: &str, ctx: &Context) -> serde Ok(bytes) => bytes, _ => return json!(hex_value.to_string()), }; - + match ClarityValue::consensus_deserialize(&mut Cursor::new(&value_bytes)) { Ok(value) => serialize_to_json(&value), Err(e) => { @@ -1319,6 +1422,7 @@ pub fn serialize_stacks_payload_to_json<'a>( "rollback": trigger.rollback.into_iter().map(|(transactions, block)| { serialize_stacks_block(block, transactions, decode_clarity_values, include_contract_abi, ctx) }).collect::>(), + "events": trigger.events.into_iter().map(|event| serialize_stacks_non_consensus_event(event, ctx)).collect::>(), "chainhook": { "uuid": trigger.chainhook.uuid, "predicate": trigger.chainhook.predicate, diff --git a/components/chainhook-sdk/src/chainhooks/tests/fixtures/stacks/testnet/occurrence.json b/components/chainhook-sdk/src/chainhooks/tests/fixtures/stacks/testnet/occurrence.json index 9fb468e6a..55ae6171b 100644 --- a/components/chainhook-sdk/src/chainhooks/tests/fixtures/stacks/testnet/occurrence.json +++ b/components/chainhook-sdk/src/chainhooks/tests/fixtures/stacks/testnet/occurrence.json @@ -1211,5 +1211,6 @@ } ] } - ] + ], + "events": [] } \ No newline at end of file diff --git a/components/chainhook-sdk/src/chainhooks/tests/mod.rs b/components/chainhook-sdk/src/chainhooks/tests/mod.rs index 14b0774b4..7f8b7a7fa 100644 --- a/components/chainhook-sdk/src/chainhooks/tests/mod.rs +++ b/components/chainhook-sdk/src/chainhooks/tests/mod.rs @@ -733,6 +733,7 @@ fn test_stacks_hook_action_noop() { chainhook: &chainhook, apply: vec![(apply_transactions, apply_blocks)], rollback: vec![(rollback_transactions, rollback_blocks)], + events: vec![] }; let proofs = HashMap::new(); @@ -811,6 +812,7 @@ fn test_stacks_hook_action_file_append() { chainhook: &chainhook, apply, rollback: vec![(rollback_transactions, rollback_block)], + events: vec![] }; let proofs = HashMap::new(); diff --git a/components/chainhook-sdk/src/indexer/mod.rs b/components/chainhook-sdk/src/indexer/mod.rs index af484d466..cc257dca5 100644 --- a/components/chainhook-sdk/src/indexer/mod.rs +++ b/components/chainhook-sdk/src/indexer/mod.rs @@ -171,21 +171,32 @@ impl Indexer { pub fn handle_stacks_marshalled_stackerdb_chunk( &mut self, marshalled_stackerdb_chunks: JsonValue, - receipt_time: u64, + receipt_time_ms: u128, ctx: &Context, ) -> Result, String> { - use chainhook_types::StacksChainUpdatedWithStackerDbChunksData; - + use chainhook_types::{ + StacksChainUpdatedWithNonConsensusEventsData, StacksNonConsensusEventData, + StacksNonConsensusEventPayloadData, + }; + let Some(chain_tip) = self.stacks_blocks_pool.get_canonical_fork_chain_tip() else { + return Err("StackerDB chunk received with no canonical chain tip".to_string()); + }; let chunks = stacks::standardize_stacks_marshalled_stackerdb_chunks( - &self.config, marshalled_stackerdb_chunks, - receipt_time, - &mut self.stacks_context, ctx, )?; if chunks.len() > 0 { - Ok(Some(StacksChainEvent::ChainUpdatedWithStackerDbChunks( - StacksChainUpdatedWithStackerDbChunksData { chunks }, + Ok(Some(StacksChainEvent::ChainUpdatedWithNonConsensusEvents( + StacksChainUpdatedWithNonConsensusEventsData { + events: chunks + .into_iter() + .map(|chunk| StacksNonConsensusEventData { + payload: StacksNonConsensusEventPayloadData::SignerMessage(chunk), + received_at_ms: receipt_time_ms as u64, + received_at_block: chain_tip.clone(), + }) + .collect(), + }, ))) } else { Ok(None) diff --git a/components/chainhook-sdk/src/indexer/stacks/blocks_pool.rs b/components/chainhook-sdk/src/indexer/stacks/blocks_pool.rs index 5897144c2..80cf44b94 100644 --- a/components/chainhook-sdk/src/indexer/stacks/blocks_pool.rs +++ b/components/chainhook-sdk/src/indexer/stacks/blocks_pool.rs @@ -49,6 +49,15 @@ impl StacksBlockPool { } } + pub fn get_canonical_fork_chain_tip(&self) -> Option<&BlockIdentifier> { + match self.forks.get(&self.canonical_fork_id) { + Some(fork) => { + Some(fork.get_tip()) + }, + None => None, + } + } + pub fn seed_block_pool(&mut self, blocks: Vec, ctx: &Context) { ctx.try_log(|logger| { slog::info!(logger, "Seeding block pool with {} blocks", blocks.len()) diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index c935adbe4..281aa9f12 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -12,7 +12,7 @@ use clarity::vm::types::{SequenceData, Value as ClarityValue}; use hiro_system_kit::slog; use rocket::serde::json::Value as JsonValue; use rocket::serde::Deserialize; -use stacks_codec::codec::{NakamotoBlock, StacksTransaction, TransactionAuth, TransactionPayload}; +use stacks_codec::codec::{StacksTransaction, TransactionAuth, TransactionPayload}; use std::collections::{BTreeMap, HashMap, HashSet}; use std::convert::TryInto; use std::io::Cursor; @@ -38,10 +38,10 @@ pub struct NewBlock { #[serde(skip_serializing_if = "Option::is_none")] pub block_time: Option, - + #[serde(skip_serializing_if = "Option::is_none")] pub signer_bitvec: Option, - + #[serde(skip_serializing_if = "Option::is_none")] pub signer_signature: Option>, @@ -305,16 +305,17 @@ pub struct ContractReadonlyCall { #[cfg(feature = "stacks-signers")] #[derive(Deserialize, Debug)] -pub struct NewStackerDbChunkIssuer { - pub issuer_id: u32, - pub slots: Vec, -} +pub struct NewStackerDbChunkIssuerId(pub u32); + +#[cfg(feature = "stacks-signers")] +#[derive(Deserialize, Debug)] +pub struct NewStackerDbChunkIssuerSlots(pub Vec); #[cfg(feature = "stacks-signers")] #[derive(Deserialize, Debug)] pub struct NewStackerDbChunksContractId { pub name: String, - pub issuer: Vec, + pub issuer: (NewStackerDbChunkIssuerId, NewStackerDbChunkIssuerSlots), } #[cfg(feature = "stacks-signers")] @@ -323,7 +324,7 @@ pub struct NewSignerModifiedSlot { pub sig: String, pub data: String, pub slot_id: u64, - pub version: u64, + pub slot_version: u64, } #[cfg(feature = "stacks-signers")] @@ -646,22 +647,18 @@ pub fn standardize_stacks_microblock_trail( #[cfg(feature = "stacks-signers")] pub fn standardize_stacks_marshalled_stackerdb_chunks( - _indexer_config: &IndexerConfig, marshalled_stackerdb_chunks: JsonValue, - receipt_time: u64, - _chain_ctx: &mut StacksChainContext, _ctx: &Context, ) -> Result, String> { let mut stackerdb_chunks: NewStackerDbChunks = serde_json::from_value(marshalled_stackerdb_chunks) .map_err(|e| format!("unable to parse stackerdb chunks {e}"))?; - standardize_stacks_stackerdb_chunks(&mut stackerdb_chunks, receipt_time) + standardize_stacks_stackerdb_chunks(&mut stackerdb_chunks) } #[cfg(feature = "stacks-signers")] pub fn standardize_stacks_stackerdb_chunks( stackerdb_chunks: &NewStackerDbChunks, - receipt_time: u64, ) -> Result, String> { use stacks_codec::codec::BlockResponse; use stacks_codec::codec::RejectCode; @@ -683,7 +680,7 @@ pub fn standardize_stacks_stackerdb_chunks( let message = match signer_message { SignerMessage::BlockProposal(block_proposal) => { StacksSignerMessage::BlockProposal(BlockProposalData { - block: standardize_stacks_nakamoto_block(&block_proposal.block), + block: standardize_stacks_nakamoto_block(&block_proposal.block)?, burn_height: block_proposal.burn_height, reward_cycle: block_proposal.reward_cycle, }) @@ -691,8 +688,8 @@ pub fn standardize_stacks_stackerdb_chunks( SignerMessage::BlockResponse(block_response) => match block_response { BlockResponse::Accepted((block_hash, sig)) => StacksSignerMessage::BlockResponse( BlockResponseData::Accepted(BlockAcceptedResponse { - signer_signature_hash: block_hash.to_hex(), - sig: sig.to_hex(), + signer_signature_hash: format!("0x{}", block_hash.to_hex()), + sig: format!("0x{}", sig.to_hex()), }), ), BlockResponse::Rejected(block_rejection) => StacksSignerMessage::BlockResponse( @@ -738,15 +735,18 @@ pub fn standardize_stacks_stackerdb_chunks( } RejectCode::TestingDirective => BlockRejectReasonCode::TestingDirective, }, - signer_signature_hash: block_rejection.signer_signature_hash.to_hex(), + signer_signature_hash: format!( + "0x{}", + block_rejection.signer_signature_hash.to_hex() + ), chain_id: block_rejection.chain_id, - signature: block_rejection.signature.to_hex(), + signature: format!("0x{}", block_rejection.signature.to_hex()), }), ), }, SignerMessage::BlockPushed(nakamoto_block) => { StacksSignerMessage::BlockPushed(BlockPushedData { - block: standardize_stacks_nakamoto_block(&nakamoto_block), + block: standardize_stacks_nakamoto_block(&nakamoto_block)?, }) } SignerMessage::MockSignature(_) @@ -757,10 +757,12 @@ pub fn standardize_stacks_stackerdb_chunks( }; parsed_chunks.push(StacksStackerDbChunk { contract: contract_id.clone(), - sig: slot.sig.clone(), - pubkey: get_signer_pubkey_from_stackerdb_chunk_slot(slot, &data_bytes)?, + sig: format!("0x{}", slot.sig), + pubkey: format!( + "0x{}", + get_signer_pubkey_from_stackerdb_chunk_slot(slot, &data_bytes)? + ), message, - receipt_time, }); } @@ -768,36 +770,78 @@ pub fn standardize_stacks_stackerdb_chunks( } #[cfg(feature = "stacks-signers")] -pub fn standardize_stacks_nakamoto_block(block: &NakamotoBlock) -> NakamotoBlockData { +pub fn standardize_stacks_nakamoto_block( + block: &stacks_codec::codec::NakamotoBlock, +) -> Result { use miniscript::bitcoin::hex::Case; use miniscript::bitcoin::hex::DisplayHex; - NakamotoBlockData { + let block_hash = get_nakamoto_block_hash(block)?; + Ok(NakamotoBlockData { header: NakamotoBlockHeaderData { version: block.header.version, chain_length: block.header.chain_length, burn_spent: block.header.burn_spent, - consensus_hash: block.header.consensus_hash.to_hex(), - parent_block_id: block.header.parent_block_id.to_hex(), - tx_merkle_root: block.header.tx_merkle_root.to_hex(), - state_index_root: block.header.state_index_root.to_hex(), + consensus_hash: format!("0x{}", block.header.consensus_hash.to_hex()), + parent_block_id: format!("0x{}", block.header.parent_block_id.to_hex()), + tx_merkle_root: format!("0x{}", block.header.tx_merkle_root.to_hex()), + state_index_root: format!("0x{}", block.header.state_index_root.to_hex()), timestamp: block.header.timestamp, - miner_signature: block.header.miner_signature.to_hex(), + miner_signature: format!("0x{}", block.header.miner_signature.to_hex()), signer_signature: block .header .signer_signature .iter() - .map(|s| s.to_hex()) + .map(|s| format!("0x{}", s.to_hex())) .collect(), - pox_treatment: block - .header - .pox_treatment - .serialize_to_vec() - .to_hex_string(Case::Lower), + pox_treatment: format!( + "0x{}", + block + .header + .pox_treatment + .serialize_to_vec() + .to_hex_string(Case::Lower) + ), }, + block_hash: block_hash.clone(), + index_block_hash: get_nakamoto_index_block_hash(&block_hash, &block.header.consensus_hash)?, // TODO(rafaelcr): Parse and return transactions. transactions: vec![], - } + }) +} + +#[cfg(feature = "stacks-signers")] +fn get_nakamoto_block_hash(block: &stacks_codec::codec::NakamotoBlock) -> Result { + use clarity::util::hash::Sha512Trunc256Sum; + + let mut block_header_bytes = vec![block.header.version]; + block_header_bytes.extend(block.header.chain_length.to_be_bytes()); + block_header_bytes.extend(block.header.burn_spent.to_be_bytes()); + block_header_bytes.extend(block.header.consensus_hash.as_bytes()); + block_header_bytes.extend(block.header.parent_block_id.as_bytes()); + block_header_bytes.extend(block.header.tx_merkle_root.as_bytes()); + block_header_bytes.extend(block.header.state_index_root.as_bytes()); + block_header_bytes.extend(block.header.timestamp.to_be_bytes()); + block_header_bytes.extend(block.header.miner_signature.as_bytes()); + block_header_bytes.extend(block.header.pox_treatment.serialize_to_vec()); + + let hash = Sha512Trunc256Sum::from_data(&block_header_bytes).to_bytes(); + Ok(format!("0x{}", hex::encode(hash))) +} + +#[cfg(feature = "stacks-signers")] +fn get_nakamoto_index_block_hash( + block_hash: &String, + consensus_hash: &clarity::types::chainstate::ConsensusHash, +) -> Result { + use clarity::util::hash::Sha512Trunc256Sum; + + let mut bytes = hex::decode(block_hash[2..].to_string()) + .map_err(|e| format!("unable to decode block hash: {e}"))?; + bytes.extend(consensus_hash.as_bytes()); + + let hash = Sha512Trunc256Sum::from_data(&bytes).to_bytes(); + Ok(format!("0x{}", hex::encode(hash))) } #[cfg(feature = "stacks-signers")] @@ -815,7 +859,7 @@ pub fn get_signer_pubkey_from_stackerdb_chunk_slot( }; let mut digest_bytes = slot.slot_id.to_be_bytes().to_vec(); - digest_bytes.extend(slot.version.to_be_bytes().to_vec()); + digest_bytes.extend(slot.slot_version.to_be_bytes()); let data_bytes_hashed = Sha512Trunc256Sum::from_data(&data_bytes).to_bytes(); digest_bytes.extend(data_bytes_hashed); let digest = Sha512Trunc256Sum::from_data(&digest_bytes).to_bytes(); diff --git a/components/chainhook-sdk/src/indexer/stacks/tests.rs b/components/chainhook-sdk/src/indexer/stacks/tests.rs index e45be1648..e5ae1bf98 100644 --- a/components/chainhook-sdk/src/indexer/stacks/tests.rs +++ b/components/chainhook-sdk/src/indexer/stacks/tests.rs @@ -413,24 +413,24 @@ fn stackerdb_chunks_covert_into_signer_messages() { "01fc3c06f6e0ae5b13c9bb53763661817e55c8e7f1ecab8b4d4b65b283d2dd39f0099e3ea1e25e765f4f0e1dfb0a432309a16a2ec10940e1a14cb9e9b1cbf27edc".to_string(), "010074aff146904763a787aa14c614d0dd1fc63b537bdb2fd351cdf881f6db75f986005eb55250597b25acbf99d3dd3c2fa8189046e1b5d21309a44cbaf2b327c09b0159a01ed3f0094bfa9e5f72f5d894e12ce252081eab5396eb8bba137bddfc365b".to_string() ); - let parsed_chunk = standardize_stacks_stackerdb_chunks(&new_chunks, 1729013425).unwrap(); + let parsed_chunk = standardize_stacks_stackerdb_chunks(&new_chunks).unwrap(); assert_eq!(parsed_chunk.len(), 1); let message = &parsed_chunk[0]; assert_eq!(message.contract, "signers-1-1"); assert_eq!( message.pubkey, - "03c76290f48909b4d49e111d69236a138ce96df3e05f709e425153d99f4fe671b4" + "0x03c76290f48909b4d49e111d69236a138ce96df3e05f709e425153d99f4fe671b4" ); - assert_eq!(message.sig, "01fc3c06f6e0ae5b13c9bb53763661817e55c8e7f1ecab8b4d4b65b283d2dd39f0099e3ea1e25e765f4f0e1dfb0a432309a16a2ec10940e1a14cb9e9b1cbf27edc"); + assert_eq!(message.sig, "0x01fc3c06f6e0ae5b13c9bb53763661817e55c8e7f1ecab8b4d4b65b283d2dd39f0099e3ea1e25e765f4f0e1dfb0a432309a16a2ec10940e1a14cb9e9b1cbf27edc"); match &message.message { StacksSignerMessage::BlockResponse(block_response_data) => match block_response_data { BlockResponseData::Accepted(block_accepted_response) => { - assert_eq!(block_accepted_response.sig, "005eb55250597b25acbf99d3dd3c2fa8189046e1b5d21309a44cbaf2b327c09b0159a01ed3f0094bfa9e5f72f5d894e12ce252081eab5396eb8bba137bddfc365b"); + assert_eq!(block_accepted_response.sig, "0x005eb55250597b25acbf99d3dd3c2fa8189046e1b5d21309a44cbaf2b327c09b0159a01ed3f0094bfa9e5f72f5d894e12ce252081eab5396eb8bba137bddfc365b"); assert_eq!( block_accepted_response.signer_signature_hash, - "74aff146904763a787aa14c614d0dd1fc63b537bdb2fd351cdf881f6db75f986" + "0x74aff146904763a787aa14c614d0dd1fc63b537bdb2fd351cdf881f6db75f986" ); } _ => assert!(false), diff --git a/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs b/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs index e944b986f..6f8386861 100644 --- a/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs +++ b/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs @@ -127,21 +127,24 @@ pub fn create_new_stackerdb_chunk( slot_data: String, ) -> crate::indexer::stacks::NewStackerDbChunks { use crate::indexer::stacks::{ - NewSignerModifiedSlot, NewStackerDbChunkIssuer, NewStackerDbChunksContractId, + NewSignerModifiedSlot, NewStackerDbChunkIssuerId, NewStackerDbChunkIssuerSlots, + NewStackerDbChunksContractId, }; crate::indexer::stacks::NewStackerDbChunks { contract_id: NewStackerDbChunksContractId { name: contract_name, - issuer: vec![NewStackerDbChunkIssuer { - issuer_id: 26, - slots: vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - }], + issuer: ( + NewStackerDbChunkIssuerId(26), + NewStackerDbChunkIssuerSlots(vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]), + ), }, modified_slots: vec![NewSignerModifiedSlot { sig: slot_sig, data: slot_data, slot_id: 1, - version: 141, + slot_version: 141, }], } } diff --git a/components/chainhook-sdk/src/observer/http.rs b/components/chainhook-sdk/src/observer/http.rs index 7468fa4a1..5a5938f51 100644 --- a/components/chainhook-sdk/src/observer/http.rs +++ b/components/chainhook-sdk/src/observer/http.rs @@ -178,8 +178,8 @@ pub fn handle_new_stacks_block( success_response() } -#[cfg(feature = "stacks-signers")] #[post("/stackerdb_chunks", format = "application/json", data = "")] +#[cfg(feature = "stacks-signers")] pub fn handle_stackerdb_chunks( indexer_rw_lock: &State>>, payload: Json, @@ -196,7 +196,7 @@ pub fn handle_stackerdb_chunks( }; let chain_event = match indexer_rw_lock.inner().write() { Ok(mut indexer) => indexer - .handle_stacks_marshalled_stackerdb_chunk(payload.into_inner(), epoch.as_secs(), ctx), + .handle_stacks_marshalled_stackerdb_chunk(payload.into_inner(), epoch.as_millis(), ctx), Err(e) => { return error_response(format!("Unable to acquire background_job_tx: {e}"), ctx); } diff --git a/components/chainhook-sdk/src/observer/mod.rs b/components/chainhook-sdk/src/observer/mod.rs index 7ac7d2b72..08f8248af 100644 --- a/components/chainhook-sdk/src/observer/mod.rs +++ b/components/chainhook-sdk/src/observer/mod.rs @@ -885,7 +885,7 @@ pub fn start_event_observer( pub async fn start_bitcoin_event_observer( config: EventObserverConfig, - observer_commands_tx: Sender, + _observer_commands_tx: Sender, observer_commands_rx: Receiver, observer_events_tx: Option>, observer_sidecar: Option, @@ -897,7 +897,8 @@ pub async fn start_bitcoin_event_observer( let ctx_moved = ctx.clone(); let config_moved = config.clone(); let _ = hiro_system_kit::thread_named("ZMQ handler").spawn(move || { - let future = zmq::start_zeromq_runloop(&config_moved, observer_commands_tx, &ctx_moved); + let future = + zmq::start_zeromq_runloop(&config_moved, _observer_commands_tx, &ctx_moved); hiro_system_kit::nestable_block_on(future); }); } @@ -1658,12 +1659,20 @@ pub async fn start_observer_commands_handler( report.track_expiration(uuid, block_identifier); } for entry in predicates_triggered.iter() { - let blocks_ids = entry + let mut block_ids = entry .apply .iter() .map(|e| e.1.get_identifier()) .collect::>(); - report.track_trigger(&entry.chainhook.uuid, &blocks_ids); + let mut event_block_ids = entry + .events + .iter() + .map(|e| &e.received_at_block) + .collect::>(); + if event_block_ids.len() > 0 { + block_ids.append(&mut event_block_ids); + } + report.track_trigger(&entry.chainhook.uuid, &block_ids); } ctx.try_log(|logger| { slog::info!( diff --git a/components/chainhook-sdk/src/utils/mod.rs b/components/chainhook-sdk/src/utils/mod.rs index 4de1df404..b74f2edbe 100644 --- a/components/chainhook-sdk/src/utils/mod.rs +++ b/components/chainhook-sdk/src/utils/mod.rs @@ -6,8 +6,7 @@ use std::{ }; use chainhook_types::{ - BitcoinBlockData, BlockHeader, BlockIdentifier, StacksBlockData, StacksMicroblockData, - StacksTransactionData, + BitcoinBlockData, BlockHeader, BlockIdentifier, StacksBlockData, StacksMicroblockData, StacksTransactionData }; use hiro_system_kit::slog::{self, Logger}; use reqwest::RequestBuilder; diff --git a/components/chainhook-types-rs/src/rosetta.rs b/components/chainhook-types-rs/src/rosetta.rs index ac75846a3..9acb4c250 100644 --- a/components/chainhook-types-rs/src/rosetta.rs +++ b/components/chainhook-types-rs/src/rosetta.rs @@ -664,6 +664,19 @@ pub struct BlockchainUpdatedWithReorg { pub confirmed_headers: Vec, } +#[derive(Clone, Debug, PartialEq, Serialize)] +#[serde(tag = "type", content = "data")] +pub enum StacksNonConsensusEventPayloadData { + SignerMessage(StacksStackerDbChunk), +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct StacksNonConsensusEventData { + pub payload: StacksNonConsensusEventPayloadData, + pub received_at_ms: u64, + pub received_at_block: BlockIdentifier, +} + #[derive(Debug, Clone, PartialEq, Serialize)] pub struct BlockHeader { pub block_identifier: BlockIdentifier, @@ -691,8 +704,8 @@ pub struct BitcoinChainUpdatedWithReorgData { } #[derive(Debug, Clone, PartialEq, Serialize)] -pub struct StacksChainUpdatedWithStackerDbChunksData { - pub chunks: Vec, +pub struct StacksChainUpdatedWithNonConsensusEventsData { + pub events: Vec, } #[allow(dead_code)] @@ -702,7 +715,7 @@ pub enum StacksChainEvent { ChainUpdatedWithReorg(StacksChainUpdatedWithReorgData), ChainUpdatedWithMicroblocks(StacksChainUpdatedWithMicroblocksData), ChainUpdatedWithMicroblocksReorg(StacksChainUpdatedWithMicroblocksReorgData), - ChainUpdatedWithStackerDbChunks(StacksChainUpdatedWithStackerDbChunksData), + ChainUpdatedWithNonConsensusEvents(StacksChainUpdatedWithNonConsensusEventsData), } impl StacksChainEvent { @@ -732,7 +745,7 @@ impl StacksChainEvent { .microblocks_to_apply .first() .and_then(|b| Some(&b.metadata.anchor_block_identifier)), - StacksChainEvent::ChainUpdatedWithStackerDbChunks(_) => None, + StacksChainEvent::ChainUpdatedWithNonConsensusEvents(_) => None, } } } diff --git a/components/chainhook-types-rs/src/signers.rs b/components/chainhook-types-rs/src/signers.rs index 96d1f3ea7..131a32b67 100644 --- a/components/chainhook-types-rs/src/signers.rs +++ b/components/chainhook-types-rs/src/signers.rs @@ -18,12 +18,13 @@ pub struct NakamotoBlockHeaderData { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct NakamotoBlockData { pub header: NakamotoBlockHeaderData, + pub block_hash: String, + pub index_block_hash: String, pub transactions: Vec, } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct BlockProposalData { - // TODO(rafaelcr): Include `block_hash` and `index_block_hash`. pub block: NakamotoBlockData, pub burn_height: u64, pub reward_cycle: u64, @@ -93,5 +94,4 @@ pub struct StacksStackerDbChunk { pub sig: String, pub pubkey: String, pub message: StacksSignerMessage, - pub receipt_time: u64, } diff --git a/components/client/typescript/package-lock.json b/components/client/typescript/package-lock.json index 8011fe4b0..55291fdc9 100644 --- a/components/client/typescript/package-lock.json +++ b/components/client/typescript/package-lock.json @@ -1,12 +1,12 @@ { "name": "@hirosystems/chainhook-client", - "version": "2.0.0", + "version": "2.1.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@hirosystems/chainhook-client", - "version": "2.0.0", + "version": "2.1.1", "license": "Apache 2.0", "dependencies": { "@fastify/type-provider-typebox": "^3.2.0", diff --git a/components/client/typescript/package.json b/components/client/typescript/package.json index f93033738..5cb41df87 100644 --- a/components/client/typescript/package.json +++ b/components/client/typescript/package.json @@ -1,6 +1,6 @@ { "name": "@hirosystems/chainhook-client", - "version": "2.0.0", + "version": "2.1.1", "description": "Chainhook TypeScript client", "main": "./dist/index.js", "typings": "./dist/index.d.ts", diff --git a/components/client/typescript/src/schemas/stacks/if_this.ts b/components/client/typescript/src/schemas/stacks/if_this.ts index 9f3f709eb..d23689bdb 100644 --- a/components/client/typescript/src/schemas/stacks/if_this.ts +++ b/components/client/typescript/src/schemas/stacks/if_this.ts @@ -74,6 +74,27 @@ export type StacksIfThisContractDeploymentTrait = Static< typeof StacksIfThisContractDeploymentTraitSchema >; +export const StacksIfThisSignerMessageAfterTimestampSchema = Type.Object({ + scope: Type.Literal('signer_message'), + after_timestamp: Type.Integer(), +}); +export type StacksIfThisSignerMessageAfterTimestamp = Static< + typeof StacksIfThisSignerMessageAfterTimestampSchema +>; + +export const StacksIfThisSignerMessageSignerPubKeySchema = Type.Object({ + scope: Type.Literal('signer_message'), + signer_pubkey: Type.String(), +}); +export type StacksIfThisSignerMessageSignerPubKey = Static< + typeof StacksIfThisSignerMessageSignerPubKeySchema +>; + +export const StacksIfThisSignerMessageSchema = Type.Union([ + StacksIfThisSignerMessageAfterTimestampSchema, +]); +export type StacksIfThisSignerMessage = Static; + export const StacksIfThisOptionsSchema = Type.Object({ start_block: Type.Optional(Type.Integer()), end_block: Type.Optional(Type.Integer()), @@ -93,6 +114,7 @@ export const StacksIfThisSchema = Type.Union([ StacksIfThisContractCallSchema, StacksIfThisContractDeploymentSchema, StacksIfThisContractDeploymentTraitSchema, + StacksIfThisSignerMessageSchema, ]); export type StacksIfThis = Static; diff --git a/components/client/typescript/src/schemas/stacks/payload.ts b/components/client/typescript/src/schemas/stacks/payload.ts index ef5969eb3..3bf1a980a 100644 --- a/components/client/typescript/src/schemas/stacks/payload.ts +++ b/components/client/typescript/src/schemas/stacks/payload.ts @@ -102,7 +102,11 @@ export const StacksEventSchema = Type.Object({ }); export type StacksEvent = Static; -export const StacksNonConsensusEventSchema = Type.Union([StacksSignerMessageEventSchema]); +export const StacksNonConsensusEventSchema = Type.Object({ + payload: Type.Union([StacksSignerMessageEventSchema]), + received_at_ms: Type.Integer(), + received_at_block: BlockIdentifierSchema, +}); export type StacksNonConsensusEvent = Static; export const StacksPayloadSchema = Type.Object({ diff --git a/components/client/typescript/src/schemas/stacks/signers.ts b/components/client/typescript/src/schemas/stacks/signers.ts index 701486901..caa0e2e3a 100644 --- a/components/client/typescript/src/schemas/stacks/signers.ts +++ b/components/client/typescript/src/schemas/stacks/signers.ts @@ -1,6 +1,4 @@ import { Static, Type } from '@fastify/type-provider-typebox'; -import { BlockIdentifierSchema } from '../common'; -import { StacksTransactionSchema } from './payload'; export const StacksNakamotoBlockHeaderSchema = Type.Object({ version: Type.Integer(), @@ -19,7 +17,10 @@ export type StacksNakamotoBlockHeader = Static; @@ -94,11 +95,12 @@ export const StacksSignerMessageSchema = Type.Union([ export type StacksSignerMessage = Static; export const StacksSignerMessageEventSchema = Type.Object({ - contract: Type.String(), - sig: Type.String(), - pubkey: Type.String(), - message: StacksSignerMessageSchema, - received_at: Type.Integer(), - received_at_block: BlockIdentifierSchema, + type: Type.Literal('SignerMessage'), + data: Type.Object({ + contract: Type.String(), + sig: Type.String(), + pubkey: Type.String(), + message: StacksSignerMessageSchema, + }), }); export type StacksSignerMessageEvent = Static; diff --git a/docs/chainhook-openapi.json b/docs/chainhook-openapi.json index 989f3c230..c8f04fdcd 100644 --- a/docs/chainhook-openapi.json +++ b/docs/chainhook-openapi.json @@ -1207,6 +1207,48 @@ ] } } + }, + { + "type": "object", + "oneOf": [ + { + "type": "object", + "required": [ + "after_timestamp" + ], + "properties": { + "after_timestamp": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "from_signer_pub_key" + ], + "properties": { + "from_signer_pub_key": { + "type": "string" + } + }, + "additionalProperties": false + } + ], + "required": [ + "scope" + ], + "properties": { + "scope": { + "type": "string", + "enum": [ + "signer_message" + ] + } + } } ] }, From cc93873dacc88e5df97d1006394b99744e63009b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Thu, 24 Oct 2024 11:50:41 -0600 Subject: [PATCH 05/25] fix: signer pubkey calculation (#665) Fixes the `slot_id` and `slot_version` field sizes so the signer pubkey is calculated correctly --- .../chainhook-sdk/src/indexer/stacks/mod.rs | 4 +- .../chainhook-sdk/src/indexer/stacks/tests.rs | 43 ++++++++++++------- .../indexer/tests/helpers/stacks_events.rs | 29 ------------- 3 files changed, 30 insertions(+), 46 deletions(-) diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index 21109f4e8..1d6a4f844 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -326,8 +326,8 @@ pub struct NewStackerDbChunksContractId { pub struct NewSignerModifiedSlot { pub sig: String, pub data: String, - pub slot_id: u64, - pub slot_version: u64, + pub slot_id: u32, + pub slot_version: u32, } #[cfg(feature = "stacks-signers")] diff --git a/components/chainhook-sdk/src/indexer/stacks/tests.rs b/components/chainhook-sdk/src/indexer/stacks/tests.rs index e5ae1bf98..493f6fd5e 100644 --- a/components/chainhook-sdk/src/indexer/stacks/tests.rs +++ b/components/chainhook-sdk/src/indexer/stacks/tests.rs @@ -401,36 +401,49 @@ fn into_chainhook_event_rejects_invalid_missing_event() { #[test] #[cfg(feature = "stacks-signers")] -fn stackerdb_chunks_covert_into_signer_messages() { +fn parses_block_response_signer_message() { use chainhook_types::{BlockResponseData, StacksSignerMessage}; - use crate::indexer::tests::helpers::stacks_events::create_new_stackerdb_chunk; + use crate::indexer::stacks::{ + NewSignerModifiedSlot, NewStackerDbChunkIssuerId, NewStackerDbChunkIssuerSlots, + NewStackerDbChunks, NewStackerDbChunksContractId, + }; use super::standardize_stacks_stackerdb_chunks; - let new_chunks = create_new_stackerdb_chunk( - "signers-1-1".to_string(), - "01fc3c06f6e0ae5b13c9bb53763661817e55c8e7f1ecab8b4d4b65b283d2dd39f0099e3ea1e25e765f4f0e1dfb0a432309a16a2ec10940e1a14cb9e9b1cbf27edc".to_string(), - "010074aff146904763a787aa14c614d0dd1fc63b537bdb2fd351cdf881f6db75f986005eb55250597b25acbf99d3dd3c2fa8189046e1b5d21309a44cbaf2b327c09b0159a01ed3f0094bfa9e5f72f5d894e12ce252081eab5396eb8bba137bddfc365b".to_string() - ); + let new_chunks = NewStackerDbChunks { + contract_id: NewStackerDbChunksContractId { + name: "signers-0-1".to_string(), + issuer: ( + NewStackerDbChunkIssuerId(26), + NewStackerDbChunkIssuerSlots(vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + ), + }, + modified_slots: vec![NewSignerModifiedSlot { + sig: "01060cc1bef9ccfe7139f5240ff5c33c44c83206e851e21b63234a996654f70d750b44d9c76466a5c45515b63183dfcfaefe5877fbd3593859e50d5df39cd469a1".to_string(), + data: "01008f913dd2bcc2cfbd1c82166e0ad99230f76de098a5ba6ee1b15b042c8f67c6f000a1c66742e665e981d10f7a70a5df312c9cba729331129ff1b510e71133d79c0122b25266bf47e8c1c923b4fde0464756ced884030e9983f797c902961fc9b0b10000005d737461636b732d7369676e657220302e302e3120283a646431656265363436303366353464616534383535386135643832643962643838356539376130312c206465627567206275696c642c206c696e7578205b616172636836345d29".to_string(), + slot_id: 1, + slot_version: 11, + }], + }; let parsed_chunk = standardize_stacks_stackerdb_chunks(&new_chunks).unwrap(); assert_eq!(parsed_chunk.len(), 1); let message = &parsed_chunk[0]; - assert_eq!(message.contract, "signers-1-1"); + assert_eq!(message.contract, "signers-0-1"); assert_eq!( message.pubkey, - "0x03c76290f48909b4d49e111d69236a138ce96df3e05f709e425153d99f4fe671b4" + "0x028efa20fa5706567008ebaf48f7ae891342eeb944d96392f719c505c89f84ed8d" ); - assert_eq!(message.sig, "0x01fc3c06f6e0ae5b13c9bb53763661817e55c8e7f1ecab8b4d4b65b283d2dd39f0099e3ea1e25e765f4f0e1dfb0a432309a16a2ec10940e1a14cb9e9b1cbf27edc"); + assert_eq!(message.sig, "0x01060cc1bef9ccfe7139f5240ff5c33c44c83206e851e21b63234a996654f70d750b44d9c76466a5c45515b63183dfcfaefe5877fbd3593859e50d5df39cd469a1"); match &message.message { - StacksSignerMessage::BlockResponse(block_response_data) => match block_response_data { - BlockResponseData::Accepted(block_accepted_response) => { - assert_eq!(block_accepted_response.sig, "0x005eb55250597b25acbf99d3dd3c2fa8189046e1b5d21309a44cbaf2b327c09b0159a01ed3f0094bfa9e5f72f5d894e12ce252081eab5396eb8bba137bddfc365b"); + StacksSignerMessage::BlockResponse(response) => match response { + BlockResponseData::Accepted(accepted) => { + assert_eq!(accepted.sig, "0x00a1c66742e665e981d10f7a70a5df312c9cba729331129ff1b510e71133d79c0122b25266bf47e8c1c923b4fde0464756ced884030e9983f797c902961fc9b0b1"); assert_eq!( - block_accepted_response.signer_signature_hash, - "0x74aff146904763a787aa14c614d0dd1fc63b537bdb2fd351cdf881f6db75f986" + accepted.signer_signature_hash, + "0x8f913dd2bcc2cfbd1c82166e0ad99230f76de098a5ba6ee1b15b042c8f67c6f0" ); } _ => assert!(false), diff --git a/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs b/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs index 6f8386861..4ca977bc5 100644 --- a/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs +++ b/components/chainhook-sdk/src/indexer/tests/helpers/stacks_events.rs @@ -119,32 +119,3 @@ pub fn create_new_event_from_stacks_event(event: StacksTransactionEventPayload) contract_event, } } - -#[cfg(feature = "stacks-signers")] -pub fn create_new_stackerdb_chunk( - contract_name: String, - slot_sig: String, - slot_data: String, -) -> crate::indexer::stacks::NewStackerDbChunks { - use crate::indexer::stacks::{ - NewSignerModifiedSlot, NewStackerDbChunkIssuerId, NewStackerDbChunkIssuerSlots, - NewStackerDbChunksContractId, - }; - crate::indexer::stacks::NewStackerDbChunks { - contract_id: NewStackerDbChunksContractId { - name: contract_name, - issuer: ( - NewStackerDbChunkIssuerId(26), - NewStackerDbChunkIssuerSlots(vec![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]), - ), - }, - modified_slots: vec![NewSignerModifiedSlot { - sig: slot_sig, - data: slot_data, - slot_id: 1, - slot_version: 141, - }], - } -} From b5ad4ba11dd72722bb6cbe936ec29411cde9a606 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 24 Oct 2024 19:53:58 +0200 Subject: [PATCH 06/25] feat: include recovered signer pubkeys in new block payload (#662) --- .../service/tests/helpers/mock_stacks_node.rs | 1 + .../fixtures/stacks/testnet/occurrence.json | 42 ++++++++++----- .../chainhook-sdk/src/indexer/stacks/mod.rs | 54 +++++++++++++++++++ .../indexer/tests/helpers/stacks_blocks.rs | 1 + components/chainhook-types-js/src/index.ts | 1 + components/chainhook-types-rs/src/rosetta.rs | 1 + .../typescript/src/schemas/stacks/payload.ts | 1 + 7 files changed, 87 insertions(+), 14 deletions(-) diff --git a/components/chainhook-cli/src/service/tests/helpers/mock_stacks_node.rs b/components/chainhook-cli/src/service/tests/helpers/mock_stacks_node.rs index 883853689..f4e064e53 100644 --- a/components/chainhook-cli/src/service/tests/helpers/mock_stacks_node.rs +++ b/components/chainhook-cli/src/service/tests/helpers/mock_stacks_node.rs @@ -264,6 +264,7 @@ pub fn create_stacks_new_block( tenure_height: Some(1122), signer_bitvec: Some("000800000001ff".to_owned()), signer_signature: Some(vec!["1234".to_owned(), "2345".to_owned()]), + signer_signature_hash: None, cycle_number: Some(1), reward_set: Some(RewardSet { pox_ustx_threshold: "50000".to_owned(), diff --git a/components/chainhook-sdk/src/chainhooks/tests/fixtures/stacks/testnet/occurrence.json b/components/chainhook-sdk/src/chainhooks/tests/fixtures/stacks/testnet/occurrence.json index d1f9474b3..51ce27c9e 100644 --- a/components/chainhook-sdk/src/chainhooks/tests/fixtures/stacks/testnet/occurrence.json +++ b/components/chainhook-sdk/src/chainhooks/tests/fixtures/stacks/testnet/occurrence.json @@ -20,7 +20,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -108,7 +109,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -195,7 +197,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -283,7 +286,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -370,7 +374,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -459,7 +464,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -547,7 +553,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -635,7 +642,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -724,7 +732,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -812,7 +821,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -900,7 +910,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -988,7 +999,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -1076,7 +1088,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", @@ -1174,7 +1187,8 @@ "cycle_number": null, "reward_set": null, "signer_bitvec": null, - "signer_signature": null + "signer_signature": null, + "signer_public_keys": null }, "parent_block_identifier": { "hash": "0x", diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index 1d6a4f844..7478dddd8 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -45,6 +45,9 @@ pub struct NewBlock { #[serde(skip_serializing_if = "Option::is_none")] pub signer_bitvec: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub signer_signature_hash: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub signer_signature: Option>, @@ -472,6 +475,13 @@ pub fn standardize_stacks_block( }) }; + let signer_sig_hash = block + .signer_signature_hash + .as_ref() + .map(|hash| { + hex::decode(&hash[2..]).expect("unable to decode signer_signature hex") + }); + let block = StacksBlockData { block_identifier: BlockIdentifier { hash: block.index_block_hash.clone(), @@ -502,6 +512,20 @@ pub fn standardize_stacks_block( signer_bitvec: block.signer_bitvec.clone(), signer_signature: block.signer_signature.clone(), + signer_public_keys: match (signer_sig_hash, &block.signer_signature) { + (Some(signer_sig_hash), Some(signatures)) => { + Some(signatures.iter().map(|sig_hex| { + let sig_msg = clarity::util::secp256k1::MessageSignature::from_hex(sig_hex) + .map_err(|e| format!("unable to parse signer signature message: {}", e))?; + let pubkey = get_signer_pubkey_from_message_hash(&signer_sig_hash, &sig_msg) + .map_err(|e| format!("unable to recover signer sig pubkey: {}", e))?; + Ok(format!("0x{}", hex::encode(pubkey))) + }) + .collect::, String>>()?) + } + _ => None, + }, + cycle_number: block.cycle_number, reward_set: block.reward_set.as_ref().and_then(|r| { Some(StacksBlockMetadataRewardSet { @@ -848,6 +872,36 @@ fn get_nakamoto_index_block_hash( Ok(format!("0x{}", hex::encode(hash))) } +pub fn get_signer_pubkey_from_message_hash( + message_hash: &Vec, + signature: &clarity::util::secp256k1::MessageSignature, +) -> Result<[u8; 33], String> { + use miniscript::bitcoin::{ + key::Secp256k1, + secp256k1::{ + ecdsa::{RecoverableSignature, RecoveryId}, + Message, + }, + }; + + let (first, sig) = signature.0.split_at(1); + let rec_id = first[0]; + + let secp = Secp256k1::new(); + let recovery_id = + RecoveryId::from_i32(rec_id as i32).map_err(|e| format!("invalid recovery id: {e}"))?; + let signature = RecoverableSignature::from_compact(&sig, recovery_id) + .map_err(|e| format!("invalid signature: {e}"))?; + let message = + Message::from_digest_slice(&message_hash).map_err(|e| format!("invalid digest message: {e}"))?; + + let pubkey = secp + .recover_ecdsa(&message, &signature) + .map_err(|e| format!("unable to recover pubkey: {e}"))?; + + Ok(pubkey.serialize()) +} + #[cfg(feature = "stacks-signers")] pub fn get_signer_pubkey_from_stackerdb_chunk_slot( slot: &NewSignerModifiedSlot, diff --git a/components/chainhook-sdk/src/indexer/tests/helpers/stacks_blocks.rs b/components/chainhook-sdk/src/indexer/tests/helpers/stacks_blocks.rs index 71123b9fe..fcbd1e795 100644 --- a/components/chainhook-sdk/src/indexer/tests/helpers/stacks_blocks.rs +++ b/components/chainhook-sdk/src/indexer/tests/helpers/stacks_blocks.rs @@ -76,6 +76,7 @@ pub fn generate_test_stacks_block( tenure_height: Some(1122), signer_bitvec: Some("1010101010101".to_owned()), signer_signature: Some(vec!["1234".to_owned(), "2345".to_owned()]), + signer_public_keys: Some(vec!["12".to_owned(), "23".to_owned()]), cycle_number: Some(1), reward_set: Some(StacksBlockMetadataRewardSet { pox_ustx_threshold: "50000".to_owned(), diff --git a/components/chainhook-types-js/src/index.ts b/components/chainhook-types-js/src/index.ts index 8f6251d32..5c51c5308 100644 --- a/components/chainhook-types-js/src/index.ts +++ b/components/chainhook-types-js/src/index.ts @@ -699,6 +699,7 @@ export interface StacksBlockMetadata { tenure_height?: number | null; signer_bitvec?: string | null; signer_signature?: string[] | null; + signer_public_keys?: string[] | null; cycle_number?: number | null; reward_set?: { pox_ustx_threshold: string; diff --git a/components/chainhook-types-rs/src/rosetta.rs b/components/chainhook-types-rs/src/rosetta.rs index ee9066b9f..6af1972b4 100644 --- a/components/chainhook-types-rs/src/rosetta.rs +++ b/components/chainhook-types-rs/src/rosetta.rs @@ -119,6 +119,7 @@ pub struct StacksBlockMetadata { pub block_time: Option, pub signer_bitvec: Option, pub signer_signature: Option>, + pub signer_public_keys: Option>, // Available starting in epoch3, only included in blocks where the pox cycle rewards are first calculated pub cycle_number: Option, diff --git a/components/client/typescript/src/schemas/stacks/payload.ts b/components/client/typescript/src/schemas/stacks/payload.ts index e25974bf8..b87683167 100644 --- a/components/client/typescript/src/schemas/stacks/payload.ts +++ b/components/client/typescript/src/schemas/stacks/payload.ts @@ -74,6 +74,7 @@ export const StacksEventMetadataSchema = Type.Object({ block_time: Nullable(Type.Integer()), signer_bitvec: Nullable(Type.String()), signer_signature: Nullable(Type.Array(Type.String())), + signer_public_keys: Nullable(Type.Array(Type.String())), // Available starting in epoch3, only included in blocks where the pox cycle rewards are first calculated cycle_number: Nullable(Type.Integer()), From 71364c15a88a06d5c71b3f3d3c88c489ff4733d1 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 24 Oct 2024 19:54:28 +0200 Subject: [PATCH 07/25] fix: update SignerMessage deserializing (#663) --- .vscode/chainhook.toml | 46 +++++++++++++++++++ .vscode/launch.json | 24 ++++++++++ Cargo.lock | 4 +- Cargo.toml | 2 +- .../src/chainhooks/stacks/mod.rs | 7 ++- .../chainhook-sdk/src/indexer/stacks/mod.rs | 18 +++++--- .../chainhook-sdk/src/indexer/stacks/tests.rs | 2 +- components/chainhook-types-rs/src/signers.rs | 16 ++++++- .../typescript/src/schemas/stacks/signers.ts | 20 +++++++- 9 files changed, 123 insertions(+), 16 deletions(-) create mode 100644 .vscode/chainhook.toml diff --git a/.vscode/chainhook.toml b/.vscode/chainhook.toml new file mode 100644 index 000000000..a01390bea --- /dev/null +++ b/.vscode/chainhook.toml @@ -0,0 +1,46 @@ +[storage] +working_dir = "cache" + +# The HTTP API allows you to register / deregister +# predicates dynamically. +# This is disabled by default. +# +[http_api] +http_port = 20456 +database_uri = "redis://localhost:6379/" + +[network] +mode = "testnet" +bitcoind_rpc_url = "http://localhost:18443" +bitcoind_rpc_username = "btc" +bitcoind_rpc_password = "btc" + +# Chainhook must be able to receive Bitcoin block events. +# These events can originate from either a Stacks node or a Bitcoin node's ZeroMQ interface. + +# By default, the service is set to receive Bitcoin block events from the Stacks node: +stacks_node_rpc_url = "http://localhost:20443" +stacks_events_ingestion_port = 20455 + +# However, events can also be received directly from a Bitcoin node. +# To achieve this, comment out the `stacks_node_rpc_url` line and uncomment the following line: +# bitcoind_zmq_url = "tcp://0.0.0.0:18543" + +[limits] +max_number_of_bitcoin_predicates = 100 +max_number_of_concurrent_bitcoin_scans = 100 +max_number_of_stacks_predicates = 10 +max_number_of_concurrent_stacks_scans = 10 +max_number_of_processing_threads = 16 +max_number_of_networking_threads = 16 +max_caching_memory_size_mb = 32000 + +# The TSV file is required for downloading historical data for your predicates. +# If this is not a requirement, you can comment out the `tsv_file_url` line. +# [[event_source]] +# tsv_file_url = "https://archive.hiro.so/regtest/stacks-blockchain-api/regtest-stacks-blockchain-api-latest" + +# Enables a server that provides metrics that can be scraped by Prometheus. +# This is disabled by default. +# [monitoring] +# prometheus_monitoring_port = 20457 diff --git a/.vscode/launch.json b/.vscode/launch.json index 448c74c77..9d0644e2c 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,6 +1,30 @@ { "version": "0.2.0", "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug executable 'chainhook'", + "cargo": { + "args": [ + "build", + "--bin=chainhook", + "--package=chainhook" + ], + "filter": { + "name": "chainhook", + "kind": "bin" + } + }, + "args": [ + "service", + "start", + "--config-path=${workspaceFolder}/.vscode/chainhook.toml", + ], + "cwd": "${workspaceFolder}", + "preLaunchTask": "redis:start", + "postDebugTask": "redis:stop" + }, { "type": "lldb", "request": "launch", diff --git a/Cargo.lock b/Cargo.lock index 23d5c23e3..989eeb269 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3549,8 +3549,8 @@ dependencies = [ [[package]] name = "stacks-codec" -version = "2.9.0" -source = "git+https://github.com/hirosystems/clarinet.git?rev=b0683675115562d719ed4b5245f620e0990030a0#b0683675115562d719ed4b5245f620e0990030a0" +version = "2.10.0" +source = "git+https://github.com/hirosystems/clarinet.git?rev=fcebfb5a986ded32d5a450c34f8e5e5f2da97de4#fcebfb5a986ded32d5a450c34f8e5e5f2da97de4" dependencies = [ "clarity", "serde", diff --git a/Cargo.toml b/Cargo.toml index e7694ce18..97b5fc91f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,4 +8,4 @@ default-members = ["components/chainhook-cli", "components/chainhook-sdk"] resolver = "2" [patch.crates-io] -stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "b0683675115562d719ed4b5245f620e0990030a0" } +stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "fcebfb5a986ded32d5a450c34f8e5e5f2da97de4" } diff --git a/components/chainhook-sdk/src/chainhooks/stacks/mod.rs b/components/chainhook-sdk/src/chainhooks/stacks/mod.rs index 1eb2c7fbf..a59344ea4 100644 --- a/components/chainhook-sdk/src/chainhooks/stacks/mod.rs +++ b/components/chainhook-sdk/src/chainhooks/stacks/mod.rs @@ -873,7 +873,10 @@ pub fn evaluate_stacks_predicate_on_non_consensus_events<'a>( | StacksPredicate::NftEvent(_) | StacksPredicate::StxEvent(_) | StacksPredicate::PrintEvent(_) - | StacksPredicate::Txid(_) => unreachable!(), + | StacksPredicate::Txid(_) => { + // Ignore, possibly expected behavior? + // https://github.com/hirosystems/chainhook/pull/663#discussion_r1814995429 + }, }; } (occurrences, expired_predicates) @@ -1107,7 +1110,7 @@ fn serialize_stacks_non_consensus_event( }; json!({ "payload": payload, - "received_at": event.received_at_ms, + "received_at_ms": event.received_at_ms, "received_at_block": event.received_at_block, }) } diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index 7478dddd8..7ebf90c16 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -714,10 +714,13 @@ pub fn standardize_stacks_stackerdb_chunks( }) } SignerMessage::BlockResponse(block_response) => match block_response { - BlockResponse::Accepted((block_hash, sig)) => StacksSignerMessage::BlockResponse( + BlockResponse::Accepted(block_accepted) => StacksSignerMessage::BlockResponse( BlockResponseData::Accepted(BlockAcceptedResponse { - signer_signature_hash: format!("0x{}", block_hash.to_hex()), - sig: format!("0x{}", sig.to_hex()), + signer_signature_hash: format!("0x{}", block_accepted.signer_signature_hash.to_hex()), + signature: format!("0x{}", block_accepted.signature.to_hex()), + metadata: SignerMessageMetadata { + server_version: block_accepted.metadata.server_version, + } }), ), BlockResponse::Rejected(block_rejection) => StacksSignerMessage::BlockResponse( @@ -725,8 +728,8 @@ pub fn standardize_stacks_stackerdb_chunks( reason: block_rejection.reason, reason_code: match block_rejection.reason_code { RejectCode::ValidationFailed(validate_reject_code) => { - BlockRejectReasonCode::ValidationFailed( - match validate_reject_code { + BlockRejectReasonCode::ValidationFailed { + validation_failed: match validate_reject_code { ValidateRejectCode::BadBlockHash => { BlockValidationFailedCode::BadBlockHash } @@ -749,7 +752,7 @@ pub fn standardize_stacks_stackerdb_chunks( BlockValidationFailedCode::NoSuchTenure } }, - ) + } } RejectCode::NoSortitionView => BlockRejectReasonCode::NoSortitionView, RejectCode::ConnectivityIssues => { @@ -769,6 +772,9 @@ pub fn standardize_stacks_stackerdb_chunks( ), chain_id: block_rejection.chain_id, signature: format!("0x{}", block_rejection.signature.to_hex()), + metadata: SignerMessageMetadata { + server_version: block_rejection.metadata.server_version, + }, }), ), }, diff --git a/components/chainhook-sdk/src/indexer/stacks/tests.rs b/components/chainhook-sdk/src/indexer/stacks/tests.rs index 493f6fd5e..f4a005c97 100644 --- a/components/chainhook-sdk/src/indexer/stacks/tests.rs +++ b/components/chainhook-sdk/src/indexer/stacks/tests.rs @@ -440,7 +440,7 @@ fn parses_block_response_signer_message() { match &message.message { StacksSignerMessage::BlockResponse(response) => match response { BlockResponseData::Accepted(accepted) => { - assert_eq!(accepted.sig, "0x00a1c66742e665e981d10f7a70a5df312c9cba729331129ff1b510e71133d79c0122b25266bf47e8c1c923b4fde0464756ced884030e9983f797c902961fc9b0b1"); + assert_eq!(accepted.signature, "0x00a1c66742e665e981d10f7a70a5df312c9cba729331129ff1b510e71133d79c0122b25266bf47e8c1c923b4fde0464756ced884030e9983f797c902961fc9b0b1"); assert_eq!( accepted.signer_signature_hash, "0x8f913dd2bcc2cfbd1c82166e0ad99230f76de098a5ba6ee1b15b042c8f67c6f0" diff --git a/components/chainhook-types-rs/src/signers.rs b/components/chainhook-types-rs/src/signers.rs index 131a32b67..c0754dd28 100644 --- a/components/chainhook-types-rs/src/signers.rs +++ b/components/chainhook-types-rs/src/signers.rs @@ -33,10 +33,17 @@ pub struct BlockProposalData { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct BlockAcceptedResponse { pub signer_signature_hash: String, - pub sig: String, + pub signature: String, + pub metadata: SignerMessageMetadata, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct SignerMessageMetadata { + pub server_version: String, } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum BlockValidationFailedCode { BadBlockHash, BadTransaction, @@ -50,7 +57,11 @@ pub enum BlockValidationFailedCode { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum BlockRejectReasonCode { - ValidationFailed(BlockValidationFailedCode), + #[serde(rename_all = "SCREAMING_SNAKE_CASE")] + ValidationFailed { + #[serde(rename = "VALIDATION_FAILED")] + validation_failed: BlockValidationFailedCode, + }, ConnectivityIssues, RejectedInPriorRound, NoSortitionView, @@ -65,6 +76,7 @@ pub struct BlockRejectedResponse { pub signer_signature_hash: String, pub chain_id: u32, pub signature: String, + pub metadata: SignerMessageMetadata, } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] diff --git a/components/client/typescript/src/schemas/stacks/signers.ts b/components/client/typescript/src/schemas/stacks/signers.ts index caa0e2e3a..7b9ceeb8f 100644 --- a/components/client/typescript/src/schemas/stacks/signers.ts +++ b/components/client/typescript/src/schemas/stacks/signers.ts @@ -40,7 +40,10 @@ export const StacksSignerMessageBlockResponseAcceptedSchema = Type.Object({ type: Type.Literal('Accepted'), data: Type.Object({ signer_signature_hash: Type.String(), - sig: Type.String(), + signature: Type.String(), + metadata: Type.Object({ + server_version: Type.String(), + }), }), }); export type StacksSignerMessageBlockResponseAccepted = Static< @@ -52,7 +55,17 @@ export const StacksSignerMessageBlockResponseRejectedSchema = Type.Object({ data: Type.Object({ reason: Type.String(), reason_code: Type.Union([ - Type.Literal('VALIDATION_FAILED'), + Type.Object({ + VALIDATION_FAILED: Type.Union([ + Type.Literal('BAD_BLOCK_HASH'), + Type.Literal('BAD_TRANSACTION'), + Type.Literal('INVALID_BLOCK'), + Type.Literal('CHAINSTATE_ERROR'), + Type.Literal('UNKNOWN_PARENT'), + Type.Literal('NON_CANONICAL_TENURE'), + Type.Literal('NO_SUCH_TENURE'), + ]), + }), Type.Literal('CONNECTIVITY_ISSUES'), Type.Literal('REJECTED_IN_PRIOR_ROUND'), Type.Literal('NO_SORTITION_VIEW'), @@ -62,6 +75,9 @@ export const StacksSignerMessageBlockResponseRejectedSchema = Type.Object({ signer_signature_hash: Type.String(), chain_id: Type.Integer(), signature: Type.String(), + metadata: Type.Object({ + server_version: Type.String(), + }), }), }); export type StacksSignerMessageBlockResponseRejected = Static< From d12acd9c99ce0d0721dfcb853e5cb236e45925b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Fri, 25 Oct 2024 10:45:48 -0600 Subject: [PATCH 08/25] feat: store signer messages in local sqlite database (#664) Creates a local sqlite DB for signer messages (`stacks_signers.sqlite`) and stores all messages in it so we can later retrieve them via predicate scans. --- Cargo.lock | 2 + components/chainhook-cli/Cargo.toml | 2 + components/chainhook-cli/src/cli/mod.rs | 9 +- components/chainhook-cli/src/scan/stacks.rs | 29 +- components/chainhook-cli/src/service/mod.rs | 81 +-- .../chainhook-cli/src/service/runloops.rs | 7 +- components/chainhook-cli/src/storage/mod.rs | 51 +- .../chainhook-cli/src/storage/signers.rs | 498 ++++++++++++++++++ .../chainhook-cli/src/storage/sqlite.rs | 87 +++ .../src/chainhooks/stacks/mod.rs | 5 +- .../chainhook-sdk/src/indexer/stacks/mod.rs | 76 +-- .../chainhook-sdk/src/indexer/stacks/tests.rs | 12 +- components/chainhook-sdk/src/observer/mod.rs | 6 +- 13 files changed, 760 insertions(+), 105 deletions(-) create mode 100644 components/chainhook-cli/src/storage/signers.rs create mode 100644 components/chainhook-cli/src/storage/sqlite.rs diff --git a/Cargo.lock b/Cargo.lock index 989eeb269..ae93e54c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -496,11 +496,13 @@ dependencies = [ "rocket", "rocket_okapi", "rocksdb", + "rusqlite", "serde", "serde-redis", "serde_derive", "serde_json", "serial_test", + "slog", "tar", "test-case", "threadpool", diff --git a/components/chainhook-cli/Cargo.toml b/components/chainhook-cli/Cargo.toml index ba1b4e07f..d29ed21a3 100644 --- a/components/chainhook-cli/Cargo.toml +++ b/components/chainhook-cli/Cargo.toml @@ -31,6 +31,8 @@ reqwest = { version = "0.12", default-features = false, features = [ "rustls-tls", ] } tokio = { version = "1.38.1", features = ["full"] } +rusqlite = { version = "0.31.0", features = ["bundled"] } +slog = { version = "2.7.0" } futures-util = "0.3.24" flate2 = "1.0.24" tar = "0.4.38" diff --git a/components/chainhook-cli/src/cli/mod.rs b/components/chainhook-cli/src/cli/mod.rs index cc0be7bb5..219950f36 100644 --- a/components/chainhook-cli/src/cli/mod.rs +++ b/components/chainhook-cli/src/cli/mod.rs @@ -11,8 +11,8 @@ use crate::storage::{ delete_confirmed_entry_from_stacks_blocks, delete_unconfirmed_entry_from_stacks_blocks, get_last_block_height_inserted, get_last_unconfirmed_block_height_inserted, get_stacks_block_at_block_height, insert_unconfirmed_entry_in_stacks_blocks, - is_stacks_block_present, open_readonly_stacks_db_conn, open_readonly_stacks_db_conn_with_retry, - open_readwrite_stacks_db_conn, set_last_confirmed_insert_key, + is_stacks_block_present, open_readonly_stacks_db_conn, open_readwrite_stacks_db_conn, + set_last_confirmed_insert_key, StacksDbConnections, }; use chainhook_sdk::chainhooks::bitcoin::BitcoinChainhookSpecification; use chainhook_sdk::chainhooks::bitcoin::BitcoinChainhookSpecificationNetworkMap; @@ -547,15 +547,14 @@ async fn handle_command(opts: Opts, ctx: Context) -> Result<(), String> { ) .await; // Refresh DB connection so it picks up recent changes made by TSV consolidation. - let new_conn = open_readonly_stacks_db_conn_with_retry( + let mut db_conns = StacksDbConnections::open_readonly( &config.expected_cache_path(), - 5, &ctx, )?; scan_stacks_chainstate_via_rocksdb_using_predicate( &predicate_spec, None, - &new_conn, + &mut db_conns, &config, None, &ctx, diff --git a/components/chainhook-cli/src/scan/stacks.rs b/components/chainhook-cli/src/scan/stacks.rs index 9e021513e..77d74dad5 100644 --- a/components/chainhook-cli/src/scan/stacks.rs +++ b/components/chainhook-cli/src/scan/stacks.rs @@ -17,14 +17,18 @@ use crate::{ get_last_block_height_inserted, get_last_unconfirmed_block_height_inserted, get_stacks_block_at_block_height, insert_entry_in_stacks_blocks, is_stacks_block_present, open_readonly_stacks_db_conn_with_retry, open_readwrite_stacks_db_conn, + signers::get_signer_db_messages_received_at_block, StacksDbConnections, }, }; -use chainhook_sdk::types::{BlockIdentifier, Chain}; use chainhook_sdk::{ chainhooks::stacks::evaluate_stacks_chainhook_on_blocks, indexer::{self, stacks::standardize_stacks_serialized_block_header, Indexer}, utils::Context, }; +use chainhook_sdk::{ + chainhooks::stacks::evaluate_stacks_predicate_on_non_consensus_events, + types::{BlockIdentifier, Chain}, +}; use chainhook_sdk::{ chainhooks::stacks::{ handle_stacks_hook_action, StacksChainhookInstance, StacksChainhookOccurrence, @@ -32,7 +36,6 @@ use chainhook_sdk::{ }, utils::{file_append, send_request, AbstractStacksBlock}, }; -use rocksdb::DB; use super::common::PredicateScanResult; @@ -180,11 +183,12 @@ pub async fn get_canonical_fork_from_tsv( pub async fn scan_stacks_chainstate_via_rocksdb_using_predicate( predicate_spec: &StacksChainhookInstance, unfinished_scan_data: Option, - stacks_db_conn: &DB, + db_conns: &mut StacksDbConnections, config: &Config, kill_signal: Option>>, ctx: &Context, ) -> Result { + let stacks_db_conn = &db_conns.stacks_db; let predicate_uuid = &predicate_spec.uuid; let mut chain_tip = match get_last_unconfirmed_block_height_inserted(stacks_db_conn, ctx) { Some(chain_tip) => chain_tip, @@ -327,11 +331,20 @@ pub async fn scan_stacks_chainstate_via_rocksdb_using_predicate( last_block_scanned = block_data.block_identifier.clone(); let blocks: Vec<&dyn AbstractStacksBlock> = vec![&block_data]; - let (hits_per_blocks, _predicates_expired) = evaluate_stacks_chainhook_on_blocks(blocks, predicate_spec, ctx); - if hits_per_blocks.is_empty() { + let events = get_signer_db_messages_received_at_block( + &mut db_conns.signers_db, + &block_data.block_identifier, + )?; + let (hits_per_events, _) = evaluate_stacks_predicate_on_non_consensus_events( + &events, + predicate_spec, + ctx, + ); + + if hits_per_blocks.is_empty() && hits_per_events.is_empty() { continue; } @@ -339,8 +352,7 @@ pub async fn scan_stacks_chainstate_via_rocksdb_using_predicate( chainhook: predicate_spec, apply: hits_per_blocks, rollback: vec![], - // TODO(rafaelcr): Query for non consensus events which fall between block timestamps to fill in here - events: vec![] + events: hits_per_events, }; let res = match handle_stacks_hook_action( trigger, @@ -536,7 +548,7 @@ pub async fn scan_stacks_chainstate_via_csv_using_predicate( apply: hits_per_blocks, rollback: vec![], // TODO(rafaelcr): Consider StackerDB chunks that come from TSVs. - events: vec![] + events: vec![], }; match handle_stacks_hook_action(trigger, &proofs, &config.get_event_observer_config(), ctx) { @@ -646,6 +658,7 @@ pub async fn consolidate_local_stacks_chainstate_using_csv( } }; + // TODO(rafaelcr): Store signer messages insert_entry_in_stacks_blocks(&block_data, &stacks_db_rw, ctx)?; if blocks_inserted % 2500 == 0 { diff --git a/components/chainhook-cli/src/service/mod.rs b/components/chainhook-cli/src/service/mod.rs index 7e50be6d3..ea22ef976 100644 --- a/components/chainhook-cli/src/service/mod.rs +++ b/components/chainhook-cli/src/service/mod.rs @@ -5,6 +5,7 @@ use crate::config::{Config, PredicatesApi, PredicatesApiConfig}; use crate::scan::stacks::consolidate_local_stacks_chainstate_using_csv; use crate::service::http_api::{load_predicates_from_redis, start_predicate_api_server}; use crate::service::runloops::{start_bitcoin_scan_runloop, start_stacks_scan_runloop}; +use crate::storage::signers::{initialize_signers_db, store_signer_db_messages}; use crate::storage::{ confirm_entries_in_stacks_blocks, draft_entries_in_stacks_blocks, get_all_unconfirmed_blocks, get_last_block_height_inserted, open_readonly_stacks_db_conn_with_retry, @@ -19,6 +20,7 @@ use chainhook_sdk::observer::{ PredicateDeregisteredEvent, PredicateEvaluationReport, PredicateInterruptedData, StacksObserverStartupContext, }; +use chainhook_sdk::{try_error, try_info}; use chainhook_sdk::types::{Chain, StacksBlockData, StacksChainEvent}; use chainhook_sdk::utils::Context; use redis::{Commands, Connection}; @@ -152,10 +154,12 @@ impl Service { } } + initialize_signers_db(&self.config.expected_cache_path(), &self.ctx) + .map_err(|e| format!("unable to initialize signers db: {e}"))?; + let (observer_command_tx, observer_command_rx) = observer_commands_tx_rx.unwrap_or(channel()); let (observer_event_tx, observer_event_rx) = crossbeam_channel::unbounded(); - // let (ordinal_indexer_command_tx, ordinal_indexer_command_rx) = channel(); let mut event_observer_config = self.config.get_event_observer_config(); event_observer_config.registered_chainhooks = chainhook_store; @@ -441,12 +445,14 @@ impl Service { data, ) => { for confirmed_block in &data.confirmed_blocks { - if let Some(expired_predicate_uuids) = expire_predicates_for_block( - &Chain::Bitcoin, - confirmed_block.block_identifier.index, - &mut predicates_db_conn, - &ctx, - ) { + if let Some(expired_predicate_uuids) = + expire_predicates_for_block( + &Chain::Bitcoin, + confirmed_block.block_identifier.index, + &mut predicates_db_conn, + &ctx, + ) + { for uuid in expired_predicate_uuids.into_iter() { let _ = observer_command_tx.send( ObserverCommand::ExpireBitcoinPredicate( @@ -466,12 +472,14 @@ impl Service { data, ) => { for confirmed_block in &data.confirmed_blocks { - if let Some(expired_predicate_uuids) = expire_predicates_for_block( - &Chain::Bitcoin, - confirmed_block.block_identifier.index, - &mut predicates_db_conn, - &ctx, - ) { + if let Some(expired_predicate_uuids) = + expire_predicates_for_block( + &Chain::Bitcoin, + confirmed_block.block_identifier.index, + &mut predicates_db_conn, + &ctx, + ) + { for uuid in expired_predicate_uuids.into_iter() { let _ = observer_command_tx.send( ObserverCommand::ExpireBitcoinPredicate( @@ -547,10 +555,16 @@ impl Service { }; } StacksChainEvent::ChainUpdatedWithMicroblocks(_) - | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {}, + | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {} StacksChainEvent::ChainUpdatedWithNonConsensusEvents(data) => { - // TODO(rafaelcr): Store signer data. - println!("signer message: {:?}", data); + if let Err(e) = store_signer_db_messages( + &self.config.expected_cache_path(), + &data.events, + &self.ctx, + ) { + try_error!(self.ctx, "unable to store signer messages: {e}"); + }; + try_info!(self.ctx, "Stored {} stacks non-consensus events", data.events.len()); } }, Err(e) => { @@ -574,12 +588,14 @@ impl Service { StacksChainEvent::ChainUpdatedWithBlocks(data) => { stacks_event += 1; for confirmed_block in &data.confirmed_blocks { - if let Some(expired_predicate_uuids) = expire_predicates_for_block( - &Chain::Stacks, - confirmed_block.block_identifier.index, - &mut predicates_db_conn, - &ctx, - ) { + if let Some(expired_predicate_uuids) = + expire_predicates_for_block( + &Chain::Stacks, + confirmed_block.block_identifier.index, + &mut predicates_db_conn, + &ctx, + ) + { for uuid in expired_predicate_uuids.into_iter() { let _ = observer_command_tx.send( ObserverCommand::ExpireStacksPredicate( @@ -597,12 +613,14 @@ impl Service { } StacksChainEvent::ChainUpdatedWithReorg(data) => { for confirmed_block in &data.confirmed_blocks { - if let Some(expired_predicate_uuids) = expire_predicates_for_block( - &Chain::Stacks, - confirmed_block.block_identifier.index, - &mut predicates_db_conn, - &ctx, - ) { + if let Some(expired_predicate_uuids) = + expire_predicates_for_block( + &Chain::Stacks, + confirmed_block.block_identifier.index, + &mut predicates_db_conn, + &ctx, + ) + { for uuid in expired_predicate_uuids.into_iter() { let _ = observer_command_tx.send( ObserverCommand::ExpireStacksPredicate( @@ -619,10 +637,10 @@ impl Service { } } StacksChainEvent::ChainUpdatedWithMicroblocks(_) - | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {}, + | StacksChainEvent::ChainUpdatedWithMicroblocksReorg(_) => {} StacksChainEvent::ChainUpdatedWithNonConsensusEvents(_) => { // TODO(rafaelcr): Expire signer message predicates when appropriate - }, + } }; update_status_from_report( Chain::Stacks, @@ -640,7 +658,8 @@ impl Service { &mut self.config, &self.ctx, ) - .await { + .await + { error!( self.ctx.expect_logger(), "Failed to update database from archive: {e}" diff --git a/components/chainhook-cli/src/service/runloops.rs b/components/chainhook-cli/src/service/runloops.rs index c271b60e0..c177b5636 100644 --- a/components/chainhook-cli/src/service/runloops.rs +++ b/components/chainhook-cli/src/service/runloops.rs @@ -19,8 +19,7 @@ use crate::{ bitcoin::scan_bitcoin_chainstate_via_rpc_using_predicate, common::PredicateScanResult, stacks::scan_stacks_chainstate_via_rocksdb_using_predicate, }, - service::{open_readwrite_predicates_db_conn_or_panic, set_predicate_interrupted_status}, - storage::open_readonly_stacks_db_conn, + service::{open_readwrite_predicates_db_conn_or_panic, set_predicate_interrupted_status}, storage::StacksDbConnections, }; use super::ScanningData; @@ -54,7 +53,7 @@ pub fn start_stacks_scan_runloop( let kill_signal = Arc::new(RwLock::new(false)); kill_signals.insert(predicate_spec.uuid.clone(), kill_signal.clone()); stacks_scan_pool.execute(move || { - let stacks_db_conn = match open_readonly_stacks_db_conn( + let mut db_conns = match StacksDbConnections::open_readonly( &moved_config.expected_cache_path(), &moved_ctx, ) { @@ -75,7 +74,7 @@ pub fn start_stacks_scan_runloop( let op = scan_stacks_chainstate_via_rocksdb_using_predicate( &predicate_spec, unfinished_scan_data, - &stacks_db_conn, + &mut db_conns, &moved_config, Some(kill_signal), &moved_ctx, diff --git a/components/chainhook-cli/src/storage/mod.rs b/components/chainhook-cli/src/storage/mod.rs index 55fa9e8f9..25a48e6af 100644 --- a/components/chainhook-cli/src/storage/mod.rs +++ b/components/chainhook-cli/src/storage/mod.rs @@ -1,9 +1,14 @@ +pub mod signers; +pub mod sqlite; + use std::collections::VecDeque; use std::path::PathBuf; use chainhook_sdk::types::{BlockIdentifier, StacksBlockData, StacksBlockUpdate}; use chainhook_sdk::utils::Context; use rocksdb::{Options, DB}; +use rusqlite::Connection; +use signers::open_readonly_signers_db_conn; const UNCONFIRMED_KEY_PREFIX: &[u8; 2] = b"~:"; const CONFIRMED_KEY_PREFIX: &[u8; 2] = b"b:"; @@ -11,6 +16,24 @@ const KEY_SUFFIX: &[u8; 2] = b":d"; const LAST_UNCONFIRMED_KEY_PREFIX: &[u8; 3] = b"m:~"; const LAST_CONFIRMED_KEY_PREFIX: &[u8; 3] = b"m:t"; +/// Keeps references to all databases used to monitor Stacks transactions and events. +// TODO(rafaelcr): Expand this struct to be flexible enough to include Bitcoin DBs and/or turn some DBs on/off. +pub struct StacksDbConnections { + pub stacks_db: DB, + // TODO(rafaelcr): Make this optional if we're not interested in signer data. + pub signers_db: Connection, +} + +impl StacksDbConnections { + /// Opens all connections in read-only mode. + pub fn open_readonly(base_dir: &PathBuf, ctx: &Context) -> Result { + Ok(StacksDbConnections { + stacks_db: open_readonly_stacks_db_conn(base_dir, ctx)?, + signers_db: open_readonly_signers_db_conn(base_dir, ctx)?, + }) + } +} + fn get_db_default_options() -> Options { let mut opts = Options::default(); opts.create_if_missing(true); @@ -70,9 +93,8 @@ pub fn open_readonly_stacks_db_conn(base_dir: &PathBuf, ctx: &Context) -> Result { match open_readwrite_stacks_db_conn(base_dir, ctx) { Ok(_) => { - let db = DB::open_for_read_only(&opts, path, false).map_err(|e| { - format!("unable to open stacks.rocksdb: {}", e) - })?; + let db = DB::open_for_read_only(&opts, path, false) + .map_err(|e| format!("unable to open stacks.rocksdb: {}", e))?; Ok(db) } Err(e) => Err(e), @@ -87,8 +109,7 @@ pub fn open_readonly_stacks_db_conn(base_dir: &PathBuf, ctx: &Context) -> Result pub fn open_readwrite_stacks_db_conn(base_dir: &PathBuf, _ctx: &Context) -> Result { let path = get_default_stacks_db_file_path(base_dir); let opts = get_db_default_options(); - let db = DB::open(&opts, path) - .map_err(|e| format!("unable to open stacks.rocksdb: {}", e))?; + let db = DB::open(&opts, path).map_err(|e| format!("unable to open stacks.rocksdb: {}", e))?; Ok(db) } @@ -195,9 +216,12 @@ pub fn delete_confirmed_entry_from_stacks_blocks( pub fn get_last_unconfirmed_block_height_inserted(stacks_db: &DB, _ctx: &Context) -> Option { stacks_db .get(get_last_unconfirmed_insert_key()) - .unwrap_or(None).map(|bytes| u64::from_be_bytes([ + .unwrap_or(None) + .map(|bytes| { + u64::from_be_bytes([ bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], - ])) + ]) + }) } pub fn get_all_unconfirmed_blocks( @@ -226,9 +250,12 @@ pub fn get_all_unconfirmed_blocks( pub fn get_last_block_height_inserted(stacks_db: &DB, _ctx: &Context) -> Option { stacks_db .get(get_last_confirmed_insert_key()) - .unwrap_or(None).map(|bytes| u64::from_be_bytes([ + .unwrap_or(None) + .map(|bytes| { + u64::from_be_bytes([ bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], - ])) + ]) + }) } pub fn confirm_entries_in_stacks_blocks( @@ -273,10 +300,8 @@ pub fn get_stacks_block_at_block_height( }) { Ok(Some(entry)) => { return Ok(Some({ - let spec: StacksBlockData = - serde_json::from_slice(&entry[..]).map_err(|e| { - format!("unable to deserialize Stacks block {}", e) - })?; + let spec: StacksBlockData = serde_json::from_slice(&entry[..]) + .map_err(|e| format!("unable to deserialize Stacks block {}", e))?; spec })) } diff --git a/components/chainhook-cli/src/storage/signers.rs b/components/chainhook-cli/src/storage/signers.rs new file mode 100644 index 000000000..40a65321c --- /dev/null +++ b/components/chainhook-cli/src/storage/signers.rs @@ -0,0 +1,498 @@ +use std::path::PathBuf; + +use chainhook_sdk::{ + try_info, + types::{ + BlockAcceptedResponse, BlockIdentifier, BlockProposalData, BlockPushedData, + BlockRejectReasonCode, BlockRejectedResponse, BlockResponseData, BlockValidationFailedCode, + NakamotoBlockData, NakamotoBlockHeaderData, SignerMessageMetadata, + StacksNonConsensusEventData, StacksNonConsensusEventPayloadData, StacksSignerMessage, + StacksStackerDbChunk, + }, + utils::Context, +}; +use rusqlite::Connection; + +use super::sqlite::{create_or_open_readwrite_db, open_existing_readonly_db}; + +fn get_default_signers_db_file_path(base_dir: &PathBuf) -> PathBuf { + let mut destination_path = base_dir.clone(); + destination_path.push("stacks_signers.sqlite"); + destination_path +} + +pub fn open_readonly_signers_db_conn( + base_dir: &PathBuf, + ctx: &Context, +) -> Result { + let path = get_default_signers_db_file_path(&base_dir); + let conn = open_existing_readonly_db(&path, ctx)?; + Ok(conn) +} + +pub fn initialize_signers_db(base_dir: &PathBuf, ctx: &Context) -> Result { + let conn = create_or_open_readwrite_db(Some(&get_default_signers_db_file_path(base_dir)), ctx)?; + + // Stores message headers + conn.execute( + "CREATE TABLE IF NOT EXISTS messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + pubkey TEXT NOT NULL, + contract TEXT NOT NULL, + sig TEXT NOT NULL, + received_at_ms INTEGER NOT NULL, + received_at_block_height INTEGER NOT NULL, + received_at_index_block_hash INTEGER NOT NULL, + type TEXT NOT NULL + )", + [], + ) + .map_err(|e| format!("unable to create table: {e}"))?; + conn.execute( + "CREATE INDEX IF NOT EXISTS index_messages_on_received_at ON messages(received_at_ms, received_at_block_height)", + [] + ).map_err(|e| format!("unable to create index: {e}"))?; + conn.execute( + "CREATE INDEX IF NOT EXISTS index_messages_on_pubkey ON messages(pubkey)", + [], + ) + .map_err(|e| format!("unable to create index: {e}"))?; + + // Stores both `BlockProposal` and `BlockPushed` messages. + conn.execute( + "CREATE TABLE IF NOT EXISTS blocks ( + message_id INTEGER NOT NULL, + proposed BOOLEAN NOT NULL, + version INTEGER NOT NULL, + chain_length INTEGER NOT NULL, + burn_spent INTEGER NOT NULL, + consensus_hash TEXT NOT NULL, + parent_block_id TEXT NOT NULL, + tx_merkle_root TEXT NOT NULL, + state_index_root TEXT NOT NULL, + timestamp INTEGER NOT NULL, + miner_signature TEXT NOT NULL, + signer_signature TEXT NOT NULL, + pox_treatment TEXT NOT NULL, + block_hash TEXT NOT NULL, + index_block_hash TEXT NOT NULL, + proposal_burn_height INTEGER, + proposal_reward_cycle INTEGER, + UNIQUE(message_id), + FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE + )", + [], + ) + .map_err(|e| format!("unable to create table: {e}"))?; + + // Stores `BlockResponse` messages. + conn.execute( + "CREATE TABLE IF NOT EXISTS block_responses ( + message_id INTEGER NOT NULL, + accepted BOOLEAN NOT NULL, + signer_signature_hash TEXT NOT NULL, + signature TEXT NOT NULL, + server_version TEXT NOT NULL, + rejected_reason TEXT, + rejected_reason_code TEXT, + rejected_validation_failed_code TEXT, + rejected_chain_id INTEGER, + UNIQUE(message_id), + FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE + )", + [], + ) + .map_err(|e| format!("unable to create table: {e}"))?; + + Ok(conn) +} + +pub fn store_signer_db_messages( + base_dir: &PathBuf, + events: &Vec, + ctx: &Context, +) -> Result<(), String> { + use chainhook_sdk::types::{StacksNonConsensusEventPayloadData, StacksSignerMessage}; + + if events.len() == 0 { + return Ok(()); + } + let mut conn = + create_or_open_readwrite_db(Some(&get_default_signers_db_file_path(base_dir)), ctx)?; + let db_tx = conn + .transaction() + .map_err(|e| format!("unable to open db transaction: {e}"))?; + { + let mut message_stmt = db_tx + .prepare_cached( + "INSERT INTO messages + (pubkey, contract, sig, received_at_ms, received_at_block_height, received_at_index_block_hash, type) + VALUES (?,?,?,?,?,?,?) + RETURNING id", + ) + .map_err(|e| format!("unable to prepare statement: {e}"))?; + for event in events.iter() { + match &event.payload { + StacksNonConsensusEventPayloadData::SignerMessage(chunk) => { + // Write message header. + let type_str = match chunk.message { + StacksSignerMessage::BlockProposal(_) => "block_proposal", + StacksSignerMessage::BlockResponse(_) => "block_response", + StacksSignerMessage::BlockPushed(_) => "block_pushed", + }; + let message_id: u64 = message_stmt + .query(rusqlite::params![ + &chunk.pubkey, + &chunk.contract, + &chunk.sig, + &event.received_at_ms, + &event.received_at_block.index, + &event.received_at_block.hash, + &type_str, + ]) + .map_err(|e| format!("unable to write message: {e}"))? + .next() + .map_err(|e| format!("unable to retrieve new message id: {e}"))? + .ok_or("message id is empty")? + .get(0) + .map_err(|e| format!("unable to convert message id: {e}"))?; + + // Write payload specifics. + match &chunk.message { + StacksSignerMessage::BlockProposal(data) => { + try_info!( + ctx, + "Storing stacks BlockProposal by signer {}", + chunk.pubkey + ); + let mut stmt = db_tx + .prepare("INSERT INTO blocks + (message_id, proposed, version, chain_length, burn_spent, consensus_hash, parent_block_id, + tx_merkle_root, state_index_root, timestamp, miner_signature, signer_signature, pox_treatment, + block_hash, index_block_hash, proposal_burn_height, proposal_reward_cycle) + VALUES (?,TRUE,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + .map_err(|e| format!("unable to prepare statement: {e}"))?; + stmt.execute(rusqlite::params![ + &message_id, + &data.block.header.version, + &data.block.header.chain_length, + &data.block.header.burn_spent, + &data.block.header.consensus_hash, + &data.block.header.parent_block_id, + &data.block.header.tx_merkle_root, + &data.block.header.state_index_root, + &data.block.header.timestamp, + &data.block.header.miner_signature, + &data.block.header.signer_signature.join(","), + &data.block.header.pox_treatment, + &data.block.block_hash, + &data.block.index_block_hash, + &data.burn_height, + &data.reward_cycle, + ]) + .map_err(|e| format!("unable to write block proposal: {e}"))?; + } + StacksSignerMessage::BlockPushed(data) => { + try_info!(ctx, "Storing stacks BlockPushed by signer {}", chunk.pubkey); + let mut stmt = db_tx + .prepare("INSERT INTO blocks + (message_id, proposed, version, chain_length, burn_spent, consensus_hash, parent_block_id, + tx_merkle_root, state_index_root, timestamp, miner_signature, signer_signature, pox_treatment, + block_hash, index_block_hash) + VALUES (?,FALSE,?,?,?,?,?,?,?,?,?,?,?,?,?)") + .map_err(|e| format!("unable to prepare statement: {e}"))?; + stmt.execute(rusqlite::params![ + &message_id, + &data.block.header.version, + &data.block.header.chain_length, + &data.block.header.burn_spent, + &data.block.header.consensus_hash, + &data.block.header.parent_block_id, + &data.block.header.tx_merkle_root, + &data.block.header.state_index_root, + &data.block.header.timestamp, + &data.block.header.miner_signature, + &data.block.header.signer_signature.join(","), + &data.block.header.pox_treatment, + &data.block.block_hash, + &data.block.index_block_hash, + ]) + .map_err(|e| format!("unable to write block pushed: {e}"))?; + } + StacksSignerMessage::BlockResponse(data) => { + match data { + BlockResponseData::Accepted(response) => { + try_info!( + ctx, + "Storing stacks BlockResponse (Accepted) by signer {}", + chunk.pubkey + ); + let mut stmt = db_tx + .prepare( + "INSERT INTO block_responses + (message_id, accepted, signer_signature_hash, signature, server_version) + VALUES (?,TRUE,?,?,?)", + ) + .map_err(|e| format!("unable to prepare statement: {e}"))?; + stmt.execute(rusqlite::params![ + &message_id, + &response.signer_signature_hash, + &response.signature, + &response.metadata.server_version, + ]) + .map_err(|e| format!("unable to write block pushed: {e}"))?; + } + BlockResponseData::Rejected(response) => { + try_info!( + ctx, + "Storing stacks BlockResponse (Rejected) by signer {}", + chunk.pubkey + ); + let mut validation_code: Option<&str> = None; + let reason_code = match &response.reason_code { + BlockRejectReasonCode::ValidationFailed { + validation_failed, + } => { + validation_code = match validation_failed { + BlockValidationFailedCode::BadBlockHash => { + Some("bad_block_hash") + } + BlockValidationFailedCode::BadTransaction => { + Some("bad_transaction") + } + BlockValidationFailedCode::InvalidBlock => { + Some("invalid_block") + } + BlockValidationFailedCode::ChainstateError => { + Some("chainstate_error") + } + BlockValidationFailedCode::UnknownParent => { + Some("unknown_parent") + } + BlockValidationFailedCode::NonCanonicalTenure => { + Some("no_canonical_tenure") + } + BlockValidationFailedCode::NoSuchTenure => { + Some("no_such_tenure") + } + }; + "validation_failed" + } + BlockRejectReasonCode::ConnectivityIssues => { + "connectivity_issues" + } + BlockRejectReasonCode::RejectedInPriorRound => { + "rejected_in_prior_round" + } + BlockRejectReasonCode::NoSortitionView => { + "no_sortition_view" + } + BlockRejectReasonCode::SortitionViewMismatch => { + "sortition_view_mismatch" + } + BlockRejectReasonCode::TestingDirective => { + "testing_directive" + } + }; + let mut stmt = db_tx + .prepare("INSERT INTO block_responses + (message_id, accepted, signer_signature_hash, signature, server_version, rejected_reason, + rejected_reason_code, rejected_validation_failed_code, rejected_chain_id) + VALUES (?,FALSE,?,?,?,?,?,?,?)") + .map_err(|e| format!("unable to prepare statement: {e}"))?; + stmt.execute(rusqlite::params![ + &message_id, + &response.signer_signature_hash, + &response.signature, + &response.metadata.server_version, + &response.reason, + &reason_code, + &validation_code, + &response.chain_id, + ]) + .map_err(|e| format!("unable to write block pushed: {e}"))?; + } + }; + } + } + } + } + } + } + db_tx + .commit() + .map_err(|e| format!("unable to commit db transaction: {e}"))?; + Ok(()) +} + +fn event_data_from_message_row( + pubkey: String, + contract: String, + sig: String, + received_at_ms: u64, + received_at_block_height: u64, + received_at_index_block_hash: String, + message: StacksSignerMessage, +) -> StacksNonConsensusEventData { + StacksNonConsensusEventData { + payload: StacksNonConsensusEventPayloadData::SignerMessage(StacksStackerDbChunk { + contract, + sig, + pubkey, + message, + }), + received_at_ms, + received_at_block: BlockIdentifier { + index: received_at_block_height, + hash: received_at_index_block_hash, + }, + } +} + +pub fn get_signer_db_messages_received_at_block( + db_conn: &mut Connection, + block_identifier: &BlockIdentifier, +) -> Result, String> { + let mut events = vec![]; + let db_tx = db_conn + .transaction() + .map_err(|e| format!("unable to open db transaction: {e}"))?; + { + let mut messages_stmt = db_tx + .prepare( + "SELECT id, pubkey, contract, sig, received_at_ms, received_at_block_height, received_at_index_block_hash, + type + FROM messages + WHERE received_at_block_height = ? + ORDER BY id ASC", + ) + .map_err(|e| format!("unable to prepare query: {e}"))?; + let mut messages_iter = messages_stmt + .query(rusqlite::params![&block_identifier.index]) + .map_err(|e| format!("unable to query messages: {e}"))?; + while let Some(row) = messages_iter + .next() + .map_err(|e| format!("row error: {e}"))? + { + let message_id: u64 = row.get(0).unwrap(); + let pubkey: String = row.get(1).unwrap(); + let contract: String = row.get(2).unwrap(); + let sig: String = row.get(3).unwrap(); + let received_at_ms: u64 = row.get(4).unwrap(); + let received_at_block_height: u64 = row.get(5).unwrap(); + let received_at_index_block_hash: String = row.get(6).unwrap(); + let type_str: String = row.get(7).unwrap(); + let message = match type_str.as_str() { + "block_proposal" + | "block_pushed" => db_tx + .query_row( + "SELECT version, chain_length, burn_spent, consensus_hash, parent_block_id, tx_merkle_root, + state_index_root, timestamp, miner_signature, signer_signature, pox_treatment, block_hash, + index_block_hash, proposal_burn_height, proposal_reward_cycle + FROM blocks + WHERE message_id = ?", + rusqlite::params![&message_id], + |block_row| { + let signer_signature_str: String = block_row.get(9).unwrap(); + let header = NakamotoBlockHeaderData { + version: block_row.get(0).unwrap(), + chain_length: block_row.get(1).unwrap(), + burn_spent: block_row.get(2).unwrap(), + consensus_hash: block_row.get(3).unwrap(), + parent_block_id: block_row.get(4).unwrap(), + tx_merkle_root: block_row.get(5).unwrap(), + state_index_root: block_row.get(6).unwrap(), + timestamp: block_row.get(7).unwrap(), + miner_signature: block_row.get(8).unwrap(), + signer_signature: signer_signature_str.split(",").map(String::from).collect(), + pox_treatment: block_row.get(10).unwrap(), + }; + let block = NakamotoBlockData { + header, + block_hash: block_row.get(11).unwrap(), + index_block_hash: block_row.get(12).unwrap(), + transactions: vec![], + }; + if type_str == "block_proposal" { + Ok(StacksSignerMessage::BlockProposal(BlockProposalData { + block, + burn_height: block_row.get(13).unwrap(), + reward_cycle: block_row.get(14).unwrap(), + })) + } else { + Ok(StacksSignerMessage::BlockPushed(BlockPushedData { block })) + } + }, + ) + .map_err(|e| format!("unable to query block proposal: {e}"))?, + "block_response" => db_tx + .query_row( + "SELECT accepted, signer_signature_hash, signature, server_version, rejected_reason, + rejected_reason_code, rejected_validation_failed_code, rejected_chain_id + FROM block_responses + WHERE message_id = ?", + rusqlite::params![&message_id], + |response_row| { + let accepted: bool = response_row.get(0).unwrap(); + let signer_signature_hash: String = response_row.get(1).unwrap(); + let signature: String = response_row.get(2).unwrap(); + let metadata = SignerMessageMetadata { + server_version: response_row.get(3).unwrap() + }; + if accepted { + Ok(StacksSignerMessage::BlockResponse(BlockResponseData::Accepted(BlockAcceptedResponse { + signer_signature_hash, + signature, + metadata, + }))) + } else { + let rejected_reason_code: String = response_row.get(5).unwrap(); + Ok(StacksSignerMessage::BlockResponse(BlockResponseData::Rejected(BlockRejectedResponse { + signer_signature_hash, + signature, + metadata, + reason: response_row.get(4).unwrap(), + reason_code: match rejected_reason_code.as_str() { + "validation_failed" => { + let validation_code: String = response_row.get(6).unwrap(); + BlockRejectReasonCode::ValidationFailed { + validation_failed: match validation_code.as_str() { + "bad_block_hash" => BlockValidationFailedCode::BadBlockHash, + "bad_transaction" => BlockValidationFailedCode::BadTransaction, + "invalid_block" => BlockValidationFailedCode::InvalidBlock, + "chainstate_error" => BlockValidationFailedCode::ChainstateError, + "unknown_parent" => BlockValidationFailedCode::UnknownParent, + "no_canonical_tenure" => BlockValidationFailedCode::NonCanonicalTenure, + "no_such_tenure" => BlockValidationFailedCode::NoSuchTenure, + _ => unreachable!(), + } + } + }, + "connectivity_issues" => BlockRejectReasonCode::ConnectivityIssues, + "rejected_in_prior_round" => BlockRejectReasonCode::RejectedInPriorRound, + "no_sortition_view" => BlockRejectReasonCode::NoSortitionView, + "sortition_view_mismatch" => BlockRejectReasonCode::SortitionViewMismatch, + "testing_directive" => BlockRejectReasonCode::TestingDirective, + _ => unreachable!(), + }, + chain_id: response_row.get(7).unwrap(), + }))) + } + }, + ) + .map_err(|e| format!("unable to query block response: {e}"))?, + _ => return Err(format!("invalid message type: {type_str}")), + }; + events.push(event_data_from_message_row( + pubkey, + contract, + sig, + received_at_ms, + received_at_block_height, + received_at_index_block_hash, + message, + )); + } + } + Ok(events) +} diff --git a/components/chainhook-cli/src/storage/sqlite.rs b/components/chainhook-cli/src/storage/sqlite.rs new file mode 100644 index 000000000..a31df002e --- /dev/null +++ b/components/chainhook-cli/src/storage/sqlite.rs @@ -0,0 +1,87 @@ +use std::path::PathBuf; + +use chainhook_sdk::{try_error, utils::Context}; +use rusqlite::{Connection, OpenFlags}; + +/// Configures the SQLite connection with common settings. +fn connection_with_defaults_pragma(conn: Connection) -> Result { + conn.busy_timeout(std::time::Duration::from_secs(300)) + .map_err(|e| format!("unable to set db timeout: {e}"))?; + conn.pragma_update(None, "mmap_size", 512 * 1024 * 1024) + .map_err(|e| format!("unable to set db mmap_size: {e}"))?; + conn.pragma_update(None, "cache_size", 512 * 1024 * 1024) + .map_err(|e| format!("unable to set db cache_size: {e}"))?; + conn.pragma_update(None, "journal_mode", &"WAL") + .map_err(|e| format!("unable to enable db wal: {e}"))?; + Ok(conn) +} + +pub fn open_existing_readonly_db(db_path: &PathBuf, ctx: &Context) -> Result { + let open_flags = match std::fs::metadata(db_path) { + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + return Err(format!("could not find {}", db_path.display())); + } else { + return Err(format!("could not stat {}", db_path.display())); + } + } + Ok(_md) => { + OpenFlags::SQLITE_OPEN_READ_ONLY + } + }; + let conn = loop { + match Connection::open_with_flags(db_path, open_flags) { + Ok(conn) => break conn, + Err(e) => { + try_error!(ctx, "unable to open hord.rocksdb: {}", e.to_string()); + } + }; + std::thread::sleep(std::time::Duration::from_secs(1)); + }; + Ok(connection_with_defaults_pragma(conn)?) +} + +pub fn create_or_open_readwrite_db( + db_path: Option<&PathBuf>, + ctx: &Context, +) -> Result { + let open_flags = if let Some(db_path) = db_path { + match std::fs::metadata(&db_path) { + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + // Create the directory path that leads to the DB file + if let Some(dirp) = PathBuf::from(&db_path).parent() { + std::fs::create_dir_all(dirp) + .map_err(|e| format!("unable to create db directory path: {e}"))?; + } + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + } else { + return Err(format!( + "could not stat db directory {}: {e}", + db_path.display() + )); + } + } + Ok(_) => OpenFlags::SQLITE_OPEN_READ_WRITE, + } + } else { + OpenFlags::SQLITE_OPEN_READ_WRITE + }; + + let path = match db_path { + Some(path) => path.to_str().unwrap(), + None => ":memory:", + }; + let conn = loop { + // Connect with retry. + match Connection::open_with_flags(&path, open_flags) { + Ok(conn) => break conn, + Err(e) => { + try_error!(ctx, "unable to open sqlite db: {e}"); + } + }; + std::thread::sleep(std::time::Duration::from_secs(1)); + }; + + Ok(connection_with_defaults_pragma(conn)?) +} diff --git a/components/chainhook-sdk/src/chainhooks/stacks/mod.rs b/components/chainhook-sdk/src/chainhooks/stacks/mod.rs index a59344ea4..36995f1a4 100644 --- a/components/chainhook-sdk/src/chainhooks/stacks/mod.rs +++ b/components/chainhook-sdk/src/chainhooks/stacks/mod.rs @@ -873,10 +873,7 @@ pub fn evaluate_stacks_predicate_on_non_consensus_events<'a>( | StacksPredicate::NftEvent(_) | StacksPredicate::StxEvent(_) | StacksPredicate::PrintEvent(_) - | StacksPredicate::Txid(_) => { - // Ignore, possibly expected behavior? - // https://github.com/hirosystems/chainhook/pull/663#discussion_r1814995429 - }, + | StacksPredicate::Txid(_) => {}, }; } (occurrences, expired_predicates) diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index 7ebf90c16..9cc5b8928 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -478,9 +478,7 @@ pub fn standardize_stacks_block( let signer_sig_hash = block .signer_signature_hash .as_ref() - .map(|hash| { - hex::decode(&hash[2..]).expect("unable to decode signer_signature hex") - }); + .map(|hash| hex::decode(&hash[2..]).expect("unable to decode signer_signature hex")); let block = StacksBlockData { block_identifier: BlockIdentifier { @@ -513,16 +511,24 @@ pub fn standardize_stacks_block( signer_signature: block.signer_signature.clone(), signer_public_keys: match (signer_sig_hash, &block.signer_signature) { - (Some(signer_sig_hash), Some(signatures)) => { - Some(signatures.iter().map(|sig_hex| { - let sig_msg = clarity::util::secp256k1::MessageSignature::from_hex(sig_hex) - .map_err(|e| format!("unable to parse signer signature message: {}", e))?; - let pubkey = get_signer_pubkey_from_message_hash(&signer_sig_hash, &sig_msg) - .map_err(|e| format!("unable to recover signer sig pubkey: {}", e))?; - Ok(format!("0x{}", hex::encode(pubkey))) - }) - .collect::, String>>()?) - } + (Some(signer_sig_hash), Some(signatures)) => Some( + signatures + .iter() + .map(|sig_hex| { + let sig_msg = + clarity::util::secp256k1::MessageSignature::from_hex(sig_hex) + .map_err(|e| { + format!("unable to parse signer signature message: {}", e) + })?; + let pubkey = + get_signer_pubkey_from_message_hash(&signer_sig_hash, &sig_msg) + .map_err(|e| { + format!("unable to recover signer sig pubkey: {}", e) + })?; + Ok(format!("0x{}", hex::encode(pubkey))) + }) + .collect::, String>>()?, + ), _ => None, }, @@ -676,35 +682,33 @@ pub fn standardize_stacks_microblock_trail( #[cfg(feature = "stacks-signers")] pub fn standardize_stacks_marshalled_stackerdb_chunks( marshalled_stackerdb_chunks: JsonValue, - _ctx: &Context, + ctx: &Context, ) -> Result, String> { let mut stackerdb_chunks: NewStackerDbChunks = serde_json::from_value(marshalled_stackerdb_chunks) .map_err(|e| format!("unable to parse stackerdb chunks {e}"))?; - standardize_stacks_stackerdb_chunks(&mut stackerdb_chunks) + standardize_stacks_stackerdb_chunks(&mut stackerdb_chunks, ctx) } #[cfg(feature = "stacks-signers")] pub fn standardize_stacks_stackerdb_chunks( stackerdb_chunks: &NewStackerDbChunks, + ctx: &Context, ) -> Result, String> { use stacks_codec::codec::BlockResponse; use stacks_codec::codec::RejectCode; use stacks_codec::codec::SignerMessage; use stacks_codec::codec::ValidateRejectCode; + use crate::try_debug; + let contract_id = &stackerdb_chunks.contract_id.name; let mut parsed_chunks: Vec = vec![]; for slot in stackerdb_chunks.modified_slots.iter() { - let data_bytes = match hex::decode(&slot.data) { - Ok(bytes) => bytes, - Err(e) => return Err(format!("unable to decode signer slot hex data: {e}")), - }; - let signer_message = - match SignerMessage::consensus_deserialize(&mut Cursor::new(&data_bytes)) { - Ok(message) => message, - Err(e) => return Err(format!("unable to deserialize SignerMessage: {e}")), - }; + let data_bytes = hex::decode(&slot.data) + .map_err(|e| format!("unable to decode signer slot hex data: {e}"))?; + let signer_message = SignerMessage::consensus_deserialize(&mut Cursor::new(&data_bytes)) + .map_err(|e| format!("unable to deserialize SignerMessage: {e}"))?; let message = match signer_message { SignerMessage::BlockProposal(block_proposal) => { StacksSignerMessage::BlockProposal(BlockProposalData { @@ -716,11 +720,14 @@ pub fn standardize_stacks_stackerdb_chunks( SignerMessage::BlockResponse(block_response) => match block_response { BlockResponse::Accepted(block_accepted) => StacksSignerMessage::BlockResponse( BlockResponseData::Accepted(BlockAcceptedResponse { - signer_signature_hash: format!("0x{}", block_accepted.signer_signature_hash.to_hex()), + signer_signature_hash: format!( + "0x{}", + block_accepted.signer_signature_hash.to_hex() + ), signature: format!("0x{}", block_accepted.signature.to_hex()), metadata: SignerMessageMetadata { server_version: block_accepted.metadata.server_version, - } + }, }), ), BlockResponse::Rejected(block_rejection) => StacksSignerMessage::BlockResponse( @@ -783,9 +790,16 @@ pub fn standardize_stacks_stackerdb_chunks( block: standardize_stacks_nakamoto_block(&nakamoto_block)?, }) } - SignerMessage::MockSignature(_) - | SignerMessage::MockProposal(_) - | SignerMessage::MockBlock(_) => { + SignerMessage::MockSignature(_) => { + try_debug!(ctx, "Ignoring MockSignature stacks signer message"); + continue; + } + SignerMessage::MockProposal(_) => { + try_debug!(ctx, "Ignoring MockProposal stacks signer message"); + continue; + } + SignerMessage::MockBlock(_) => { + try_debug!(ctx, "Ignoring MockBlock stacks signer message"); continue; } }; @@ -898,8 +912,8 @@ pub fn get_signer_pubkey_from_message_hash( RecoveryId::from_i32(rec_id as i32).map_err(|e| format!("invalid recovery id: {e}"))?; let signature = RecoverableSignature::from_compact(&sig, recovery_id) .map_err(|e| format!("invalid signature: {e}"))?; - let message = - Message::from_digest_slice(&message_hash).map_err(|e| format!("invalid digest message: {e}"))?; + let message = Message::from_digest_slice(&message_hash) + .map_err(|e| format!("invalid digest message: {e}"))?; let pubkey = secp .recover_ecdsa(&message, &signature) diff --git a/components/chainhook-sdk/src/indexer/stacks/tests.rs b/components/chainhook-sdk/src/indexer/stacks/tests.rs index f4a005c97..bf775d59b 100644 --- a/components/chainhook-sdk/src/indexer/stacks/tests.rs +++ b/components/chainhook-sdk/src/indexer/stacks/tests.rs @@ -404,9 +404,12 @@ fn into_chainhook_event_rejects_invalid_missing_event() { fn parses_block_response_signer_message() { use chainhook_types::{BlockResponseData, StacksSignerMessage}; - use crate::indexer::stacks::{ - NewSignerModifiedSlot, NewStackerDbChunkIssuerId, NewStackerDbChunkIssuerSlots, - NewStackerDbChunks, NewStackerDbChunksContractId, + use crate::{ + indexer::stacks::{ + NewSignerModifiedSlot, NewStackerDbChunkIssuerId, NewStackerDbChunkIssuerSlots, + NewStackerDbChunks, NewStackerDbChunksContractId, + }, + utils::Context, }; use super::standardize_stacks_stackerdb_chunks; @@ -426,7 +429,8 @@ fn parses_block_response_signer_message() { slot_version: 11, }], }; - let parsed_chunk = standardize_stacks_stackerdb_chunks(&new_chunks).unwrap(); + let ctx = &Context::empty(); + let parsed_chunk = standardize_stacks_stackerdb_chunks(&new_chunks, ctx).unwrap(); assert_eq!(parsed_chunk.len(), 1); let message = &parsed_chunk[0]; diff --git a/components/chainhook-sdk/src/observer/mod.rs b/components/chainhook-sdk/src/observer/mod.rs index 08f8248af..1abe47ae7 100644 --- a/components/chainhook-sdk/src/observer/mod.rs +++ b/components/chainhook-sdk/src/observer/mod.rs @@ -959,11 +959,7 @@ pub async fn start_stacks_event_observer( indexer.seed_stacks_block_pool(stacks_startup_context.block_pool_seed, &ctx); let log_level = if config.display_stacks_ingestion_logs { - if cfg!(feature = "cli") { - LogLevel::Critical - } else { - LogLevel::Debug - } + LogLevel::Debug } else { LogLevel::Off }; From 44bd445f2e42ec71e9724d71ec41538884f11393 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Fri, 25 Oct 2024 10:54:09 -0600 Subject: [PATCH 09/25] ci: publish stacks signers as beta release --- .github/workflows/ci.yaml | 2 ++ .releaserc | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0b30ab741..e2ec9feb3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -4,7 +4,9 @@ on: push: branches: - main + - beta - develop + - stacks-signers paths-ignore: - '**/CHANGELOG.md' pull_request: diff --git a/.releaserc b/.releaserc index 9ef87a7c0..66c1ac94c 100644 --- a/.releaserc +++ b/.releaserc @@ -1,6 +1,16 @@ { "branches": [ - "main" + "main", + { + "name": "beta", + "channel": "beta", + "prerelease": true + }, + { + "name": "stacks-signers", + "channel": "stacks-signers", + "prerelease": true + } ], "plugins": [ [ From a682ba0e15e1eba2c9e7bff0a220cb31d3c852e9 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Fri, 25 Oct 2024 17:17:56 +0000 Subject: [PATCH 10/25] chore(release): 1.9.0-stacks-signers.1 [skip ci] ## [1.9.0-stacks-signers.1](https://github.com/hirosystems/chainhook/compare/v1.8.0...v1.9.0-stacks-signers.1) (2024-10-25) ### Features * add various new nakamoto block fields to `/new_block` ingestion and `StacksPayload` ([#659](https://github.com/hirosystems/chainhook/issues/659)) ([f48cda5](https://github.com/hirosystems/chainhook/commit/f48cda533dfc58bb630737fb29cf1bc9c966a638)) * include recovered signer pubkeys in new block payload ([#662](https://github.com/hirosystems/chainhook/issues/662)) ([b5ad4ba](https://github.com/hirosystems/chainhook/commit/b5ad4ba11dd72722bb6cbe936ec29411cde9a606)) * include signer messages in Stacks predicate payloads ([#656](https://github.com/hirosystems/chainhook/issues/656)) ([aee14bc](https://github.com/hirosystems/chainhook/commit/aee14bc693573f403f8a6d8eafe7b30d2ca76b54)) * parse `/stackerdb_chunks` Stacks node event ([#653](https://github.com/hirosystems/chainhook/issues/653)) ([e44d84a](https://github.com/hirosystems/chainhook/commit/e44d84a0d739921a5a3ccae6e9643bdb85005f71)) * store signer messages in local sqlite database ([#664](https://github.com/hirosystems/chainhook/issues/664)) ([d12acd9](https://github.com/hirosystems/chainhook/commit/d12acd9c99ce0d0721dfcb853e5cb236e45925b7)) * support tenure-height in block messages ([#661](https://github.com/hirosystems/chainhook/issues/661)) ([99e5a61](https://github.com/hirosystems/chainhook/commit/99e5a6119fb8e87e1e14d789fdb866eae230e8e8)) * **ts-client:** add persisted predicates and periodic health check ([#658](https://github.com/hirosystems/chainhook/issues/658)) ([535226a](https://github.com/hirosystems/chainhook/commit/535226a80480585aabfcf89d68b21d2ada1c50c1)) * **ts-client:** add signer messages to stacks payloads ([#657](https://github.com/hirosystems/chainhook/issues/657)) ([ff66bb2](https://github.com/hirosystems/chainhook/commit/ff66bb2eab771c16fead154607a280664eb6be4e)) ### Bug Fixes * add a configurable max timeout for outgoing predicate payload requests ([#642](https://github.com/hirosystems/chainhook/issues/642)) ([6c1dfa9](https://github.com/hirosystems/chainhook/commit/6c1dfa9f741041d0fac32e2a89c04a71b4a485cd)), closes [#643](https://github.com/hirosystems/chainhook/issues/643) * increase stacks event server body size limit to 500MB ([#660](https://github.com/hirosystems/chainhook/issues/660)) ([21234c9](https://github.com/hirosystems/chainhook/commit/21234c978d4c49ea5526c2cf1f74d42a645a68c8)) * return 500 status code to Stacks node when event ingestion fails ([#648](https://github.com/hirosystems/chainhook/issues/648)) ([63c753c](https://github.com/hirosystems/chainhook/commit/63c753c1a1b043e271d1ebca1f90f154030a89e8)), closes [#646](https://github.com/hirosystems/chainhook/issues/646) * signer pubkey calculation ([#665](https://github.com/hirosystems/chainhook/issues/665)) ([cc93873](https://github.com/hirosystems/chainhook/commit/cc93873dacc88e5df97d1006394b99744e63009b)) * update SignerMessage deserializing ([#663](https://github.com/hirosystems/chainhook/issues/663)) ([71364c1](https://github.com/hirosystems/chainhook/commit/71364c15a88a06d5c71b3f3d3c88c489ff4733d1)) * validate ts client predicates before registering ([#639](https://github.com/hirosystems/chainhook/issues/639)) ([2d08f72](https://github.com/hirosystems/chainhook/commit/2d08f72e44446e9c311f8a64f992e103f17a4ae7)) --- CHANGELOG.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b1e09583..d8cd3936c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,27 @@ +## [1.9.0-stacks-signers.1](https://github.com/hirosystems/chainhook/compare/v1.8.0...v1.9.0-stacks-signers.1) (2024-10-25) + + +### Features + +* add various new nakamoto block fields to `/new_block` ingestion and `StacksPayload` ([#659](https://github.com/hirosystems/chainhook/issues/659)) ([f48cda5](https://github.com/hirosystems/chainhook/commit/f48cda533dfc58bb630737fb29cf1bc9c966a638)) +* include recovered signer pubkeys in new block payload ([#662](https://github.com/hirosystems/chainhook/issues/662)) ([b5ad4ba](https://github.com/hirosystems/chainhook/commit/b5ad4ba11dd72722bb6cbe936ec29411cde9a606)) +* include signer messages in Stacks predicate payloads ([#656](https://github.com/hirosystems/chainhook/issues/656)) ([aee14bc](https://github.com/hirosystems/chainhook/commit/aee14bc693573f403f8a6d8eafe7b30d2ca76b54)) +* parse `/stackerdb_chunks` Stacks node event ([#653](https://github.com/hirosystems/chainhook/issues/653)) ([e44d84a](https://github.com/hirosystems/chainhook/commit/e44d84a0d739921a5a3ccae6e9643bdb85005f71)) +* store signer messages in local sqlite database ([#664](https://github.com/hirosystems/chainhook/issues/664)) ([d12acd9](https://github.com/hirosystems/chainhook/commit/d12acd9c99ce0d0721dfcb853e5cb236e45925b7)) +* support tenure-height in block messages ([#661](https://github.com/hirosystems/chainhook/issues/661)) ([99e5a61](https://github.com/hirosystems/chainhook/commit/99e5a6119fb8e87e1e14d789fdb866eae230e8e8)) +* **ts-client:** add persisted predicates and periodic health check ([#658](https://github.com/hirosystems/chainhook/issues/658)) ([535226a](https://github.com/hirosystems/chainhook/commit/535226a80480585aabfcf89d68b21d2ada1c50c1)) +* **ts-client:** add signer messages to stacks payloads ([#657](https://github.com/hirosystems/chainhook/issues/657)) ([ff66bb2](https://github.com/hirosystems/chainhook/commit/ff66bb2eab771c16fead154607a280664eb6be4e)) + + +### Bug Fixes + +* add a configurable max timeout for outgoing predicate payload requests ([#642](https://github.com/hirosystems/chainhook/issues/642)) ([6c1dfa9](https://github.com/hirosystems/chainhook/commit/6c1dfa9f741041d0fac32e2a89c04a71b4a485cd)), closes [#643](https://github.com/hirosystems/chainhook/issues/643) +* increase stacks event server body size limit to 500MB ([#660](https://github.com/hirosystems/chainhook/issues/660)) ([21234c9](https://github.com/hirosystems/chainhook/commit/21234c978d4c49ea5526c2cf1f74d42a645a68c8)) +* return 500 status code to Stacks node when event ingestion fails ([#648](https://github.com/hirosystems/chainhook/issues/648)) ([63c753c](https://github.com/hirosystems/chainhook/commit/63c753c1a1b043e271d1ebca1f90f154030a89e8)), closes [#646](https://github.com/hirosystems/chainhook/issues/646) +* signer pubkey calculation ([#665](https://github.com/hirosystems/chainhook/issues/665)) ([cc93873](https://github.com/hirosystems/chainhook/commit/cc93873dacc88e5df97d1006394b99744e63009b)) +* update SignerMessage deserializing ([#663](https://github.com/hirosystems/chainhook/issues/663)) ([71364c1](https://github.com/hirosystems/chainhook/commit/71364c15a88a06d5c71b3f3d3c88c489ff4733d1)) +* validate ts client predicates before registering ([#639](https://github.com/hirosystems/chainhook/issues/639)) ([2d08f72](https://github.com/hirosystems/chainhook/commit/2d08f72e44446e9c311f8a64f992e103f17a4ae7)) + ## [1.8.0](https://github.com/hirosystems/chainhook/compare/v1.7.0...v1.8.0) (2024-08-12) ### New Features From 34db7144661e89dfc859e2eb3d5e684f96231ad0 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Fri, 25 Oct 2024 11:32:40 -0600 Subject: [PATCH 11/25] ci: check out the correct git tag on release --- .github/workflows/ci.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e2ec9feb3..4b2f3e8ad 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -285,6 +285,7 @@ jobs: - distributions outputs: new_release_version: ${{ steps.semantic.outputs.new_release_version }} + new_release_git_tag: ${{ steps.semantic.outputs.new_release_git_tag }} steps: - uses: actions/checkout@v4 with: @@ -340,7 +341,7 @@ jobs: uses: actions/checkout@v4 with: persist-credentials: false - ref: ${{ needs.semantic-release.outputs.new_release_version }} + ref: ${{ needs.semantic-release.outputs.new_release_git_tag }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 From ad5fd54b3d1d8f638fa44a531bca71306fbb8c6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Fri, 25 Oct 2024 15:09:20 -0600 Subject: [PATCH 12/25] feat: add mock signer message support (#669) --- .../chainhook-cli/src/storage/signers.rs | 287 +++++++++++++++++- .../chainhook-sdk/src/indexer/stacks/mod.rs | 72 ++++- components/chainhook-types-rs/src/signers.rs | 33 +- .../client/typescript/package-lock.json | 4 +- components/client/typescript/package.json | 2 +- .../typescript/src/schemas/stacks/signers.ts | 62 +++- 6 files changed, 433 insertions(+), 27 deletions(-) diff --git a/components/chainhook-cli/src/storage/signers.rs b/components/chainhook-cli/src/storage/signers.rs index 40a65321c..c01c6abd3 100644 --- a/components/chainhook-cli/src/storage/signers.rs +++ b/components/chainhook-cli/src/storage/signers.rs @@ -5,13 +5,13 @@ use chainhook_sdk::{ types::{ BlockAcceptedResponse, BlockIdentifier, BlockProposalData, BlockPushedData, BlockRejectReasonCode, BlockRejectedResponse, BlockResponseData, BlockValidationFailedCode, - NakamotoBlockData, NakamotoBlockHeaderData, SignerMessageMetadata, - StacksNonConsensusEventData, StacksNonConsensusEventPayloadData, StacksSignerMessage, - StacksStackerDbChunk, + MockBlockData, MockProposalData, MockSignatureData, NakamotoBlockData, + NakamotoBlockHeaderData, PeerInfoData, SignerMessageMetadata, StacksNonConsensusEventData, + StacksNonConsensusEventPayloadData, StacksSignerMessage, StacksStackerDbChunk, }, utils::Context, }; -use rusqlite::Connection; +use rusqlite::{Connection, Transaction}; use super::sqlite::{create_or_open_readwrite_db, open_existing_readonly_db}; @@ -49,7 +49,7 @@ pub fn initialize_signers_db(base_dir: &PathBuf, ctx: &Context) -> Result Result, + peer_info: &PeerInfoData, + message_id: Option, +) -> Result { + let mut proposal_stmt = db_tx + .prepare( + "INSERT INTO mock_proposals + (message_id, burn_block_height, stacks_tip_consensus_hash, stacks_tip, stacks_tip_height, pox_consensus, + server_version, network_id, index_block_hash) + VALUES (?,?,?,?,?,?,?,?) + RETURNING id", + ) + .map_err(|e| format!("unable to prepare statement: {e}"))?; + let mock_proposal_id: u64 = proposal_stmt + .query(rusqlite::params![ + &message_id, + &peer_info.burn_block_height, + &peer_info.stacks_tip_consensus_hash, + &peer_info.stacks_tip, + &peer_info.stacks_tip_height, + &peer_info.pox_consensus, + &peer_info.server_version, + &peer_info.network_id, + &peer_info.index_block_hash, + ]) + .map_err(|e| format!("unable to write mock proposal: {e}"))? + .next() + .map_err(|e| format!("unable to retrieve mock proposal id: {e}"))? + .ok_or("mock proposal id is empty")? + .get(0) + .map_err(|e| format!("unable to convert message id: {e}"))?; + Ok(mock_proposal_id) +} + +fn store_mock_signature( + db_tx: &Transaction<'_>, + peer_info: &PeerInfoData, + metadata: &SignerMessageMetadata, + message_id: Option, + mock_block_id: Option, +) -> Result<(), String> { + let mock_proposal_id = store_mock_proposal_peer_info(&db_tx, &peer_info, None)?; + let mut signature_stmt = db_tx + .prepare( + "INSERT INTO mock_signatures + (message_id, mock_proposal_id, mock_block_id, server_version) + VALUES (?,?,?,?)", + ) + .map_err(|e| format!("unable to prepare statement: {e}"))?; + signature_stmt + .execute(rusqlite::params![ + &message_id, + &mock_proposal_id, + &mock_block_id, + &metadata.server_version, + ]) + .map_err(|e| format!("unable to write mock signature: {e}"))?; + Ok(()) +} + pub fn store_signer_db_messages( base_dir: &PathBuf, events: &Vec, @@ -139,6 +251,9 @@ pub fn store_signer_db_messages( StacksSignerMessage::BlockProposal(_) => "block_proposal", StacksSignerMessage::BlockResponse(_) => "block_response", StacksSignerMessage::BlockPushed(_) => "block_pushed", + StacksSignerMessage::MockBlock(_) => "mock_block", + StacksSignerMessage::MockSignature(_) => "mock_signature", + StacksSignerMessage::MockProposal(_) => "mock_proposal", }; let message_id: u64 = message_stmt .query(rusqlite::params![ @@ -314,6 +429,61 @@ pub fn store_signer_db_messages( } }; } + StacksSignerMessage::MockSignature(data) => { + try_info!( + ctx, + "Storing stacks MockSignature by signer {}", + chunk.pubkey + ); + store_mock_signature( + &db_tx, + &data.mock_proposal.peer_info, + &data.metadata, + Some(message_id), + None, + )?; + } + StacksSignerMessage::MockProposal(data) => { + try_info!( + ctx, + "Storing stacks MockProposal by signer {}", + chunk.pubkey + ); + let _ = store_mock_proposal_peer_info(&db_tx, data, Some(message_id)); + } + StacksSignerMessage::MockBlock(data) => { + try_info!(ctx, "Storing stacks MockBlock by signer {}", chunk.pubkey); + let mock_proposal_id = store_mock_proposal_peer_info( + &db_tx, + &data.mock_proposal.peer_info, + None, + )?; + let mut block_stmt = db_tx + .prepare( + "INSERT INTO mock_blocks + (message_id, mock_proposal_id) + VALUES (?,?) + RETURNING id", + ) + .map_err(|e| format!("unable to prepare statement: {e}"))?; + let mock_block_id: u64 = block_stmt + .query(rusqlite::params![&message_id, &mock_proposal_id,]) + .map_err(|e| format!("unable to write mock block: {e}"))? + .next() + .map_err(|e| format!("unable to retrieve mock block id: {e}"))? + .ok_or("mock block id is empty")? + .get(0) + .map_err(|e| format!("unable to convert message id: {e}"))?; + for signature in data.mock_signatures.iter() { + store_mock_signature( + &db_tx, + &signature.mock_proposal.peer_info, + &signature.metadata, + None, + Some(mock_block_id), + )?; + } + } } } } @@ -481,6 +651,113 @@ pub fn get_signer_db_messages_received_at_block( }, ) .map_err(|e| format!("unable to query block response: {e}"))?, + "mock_signature" => db_tx + .query_row( + "SELECT p.burn_block_height, p.stacks_tip_consensus_hash, p.stacks_tip, p.stacks_tip_height, + p.pox_consensus, p.server_version AS peer_version, p.network_id, s.server_version + FROM mock_signatures AS s + INNER JOIN mock_proposals AS p ON p.id = s.mock_proposal_id + WHERE s.message_id = ?", + rusqlite::params![&message_id], + |signature_row| { + Ok(StacksSignerMessage::MockSignature(MockSignatureData { + mock_proposal: MockProposalData { + peer_info: PeerInfoData { + burn_block_height: signature_row.get(0).unwrap(), + stacks_tip_consensus_hash: signature_row.get(1).unwrap(), + stacks_tip: signature_row.get(2).unwrap(), + stacks_tip_height: signature_row.get(3).unwrap(), + pox_consensus: signature_row.get(4).unwrap(), + server_version: signature_row.get(5).unwrap(), + network_id: signature_row.get(6).unwrap(), + index_block_hash: signature_row.get(7).unwrap(), + } + }, + metadata: SignerMessageMetadata { + server_version: signature_row.get(8).unwrap() + } + })) + }, + ) + .map_err(|e| format!("unable to query mock signature: {e}"))?, + "mock_proposal" => db_tx + .query_row( + "SELECT burn_block_height, stacks_tip_consensus_hash, stacks_tip, stacks_tip_height, + pox_consensus, server_version, network_id, index_block_hash + FROM mock_proposals + WHERE message_id = ?", + rusqlite::params![&message_id], + |proposal_row| { + Ok(StacksSignerMessage::MockProposal(PeerInfoData { + burn_block_height: proposal_row.get(0).unwrap(), + stacks_tip_consensus_hash: proposal_row.get(1).unwrap(), + stacks_tip: proposal_row.get(2).unwrap(), + stacks_tip_height: proposal_row.get(3).unwrap(), + pox_consensus: proposal_row.get(4).unwrap(), + server_version: proposal_row.get(5).unwrap(), + network_id: proposal_row.get(6).unwrap(), + index_block_hash: proposal_row.get(7).unwrap(), + })) + }, + ) + .map_err(|e| format!("unable to query mock proposal: {e}"))?, + "mock_block" => db_tx + .query_row( + "SELECT b.id, p.burn_block_height, p.stacks_tip_consensus_hash, p.stacks_tip, p.stacks_tip_height, + p.pox_consensus, p.server_version, p.network_id, p.index_block_hash + FROM mock_blocks AS b + INNER JOIN mock_proposals AS p ON p.id = b.mock_proposal_id + WHERE b.message_id = ?", + rusqlite::params![&message_id], + |block_row| { + let mock_block_id: u64 = block_row.get(0).unwrap(); + let mut sig_stmt = db_tx + .prepare( + "SELECT p.burn_block_height, p.stacks_tip_consensus_hash, p.stacks_tip, + p.stacks_tip_height, p.pox_consensus, p.server_version AS peer_version, + p.network_id, p.index_block_hash, s.server_version + FROM mock_signatures AS s + INNER JOIN mock_proposals AS p ON p.id = s.mock_proposal_id + WHERE s.mock_block_id = ?")?; + let mut signatures_iter = sig_stmt.query(rusqlite::params![&mock_block_id])?; + let mut mock_signatures = vec![]; + while let Some(signature_row) = signatures_iter.next()? { + mock_signatures.push(MockSignatureData { + mock_proposal: MockProposalData { + peer_info: PeerInfoData { + burn_block_height: signature_row.get(0).unwrap(), + stacks_tip_consensus_hash: signature_row.get(1).unwrap(), + stacks_tip: signature_row.get(2).unwrap(), + stacks_tip_height: signature_row.get(3).unwrap(), + pox_consensus: signature_row.get(4).unwrap(), + server_version: signature_row.get(5).unwrap(), + network_id: signature_row.get(6).unwrap(), + index_block_hash: signature_row.get(7).unwrap(), + } + }, + metadata: SignerMessageMetadata { + server_version: signature_row.get(8).unwrap() + } + }); + } + Ok(StacksSignerMessage::MockBlock(MockBlockData { + mock_proposal: MockProposalData { + peer_info: PeerInfoData { + burn_block_height: block_row.get(1).unwrap(), + stacks_tip_consensus_hash: block_row.get(2).unwrap(), + stacks_tip: block_row.get(3).unwrap(), + stacks_tip_height: block_row.get(4).unwrap(), + pox_consensus: block_row.get(5).unwrap(), + server_version: block_row.get(6).unwrap(), + network_id: block_row.get(7).unwrap(), + index_block_hash: block_row.get(8).unwrap(), + } + }, + mock_signatures + })) + }, + ) + .map_err(|e| format!("unable to query mock block: {e}"))?, _ => return Err(format!("invalid message type: {type_str}")), }; events.push(event_data_from_message_row( diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index 9cc5b8928..3a4a12a86 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -693,15 +693,13 @@ pub fn standardize_stacks_marshalled_stackerdb_chunks( #[cfg(feature = "stacks-signers")] pub fn standardize_stacks_stackerdb_chunks( stackerdb_chunks: &NewStackerDbChunks, - ctx: &Context, + _ctx: &Context, ) -> Result, String> { use stacks_codec::codec::BlockResponse; use stacks_codec::codec::RejectCode; use stacks_codec::codec::SignerMessage; use stacks_codec::codec::ValidateRejectCode; - use crate::try_debug; - let contract_id = &stackerdb_chunks.contract_id.name; let mut parsed_chunks: Vec = vec![]; for slot in stackerdb_chunks.modified_slots.iter() { @@ -790,18 +788,28 @@ pub fn standardize_stacks_stackerdb_chunks( block: standardize_stacks_nakamoto_block(&nakamoto_block)?, }) } - SignerMessage::MockSignature(_) => { - try_debug!(ctx, "Ignoring MockSignature stacks signer message"); - continue; - } - SignerMessage::MockProposal(_) => { - try_debug!(ctx, "Ignoring MockProposal stacks signer message"); - continue; - } - SignerMessage::MockBlock(_) => { - try_debug!(ctx, "Ignoring MockBlock stacks signer message"); - continue; - } + SignerMessage::MockSignature(signature) => StacksSignerMessage::MockSignature( + standardize_stacks_signer_mock_signature(&signature)?, + ), + SignerMessage::MockProposal(data) => StacksSignerMessage::MockProposal( + standardize_stacks_signer_peer_info(&data.peer_info)?, + ), + SignerMessage::MockBlock(data) => StacksSignerMessage::MockBlock(MockBlockData { + mock_proposal: MockProposalData { + peer_info: standardize_stacks_signer_peer_info(&data.mock_proposal.peer_info)?, + }, + mock_signatures: data + .mock_signatures + .iter() + .map(|signature| standardize_stacks_signer_mock_signature(signature)) + .try_fold(Vec::new(), |mut acc, item| -> Result, String> { + item.and_then(|val| { + acc.push(val); + Ok(()) + })?; + Ok(acc) + })?, + }), }; parsed_chunks.push(StacksStackerDbChunk { contract: contract_id.clone(), @@ -817,6 +825,40 @@ pub fn standardize_stacks_stackerdb_chunks( Ok(parsed_chunks) } +#[cfg(feature = "stacks-signers")] +pub fn standardize_stacks_signer_mock_signature( + signature: &stacks_codec::codec::MockSignature, +) -> Result { + Ok(MockSignatureData { + mock_proposal: MockProposalData { + peer_info: standardize_stacks_signer_peer_info(&signature.mock_proposal.peer_info)?, + }, + metadata: SignerMessageMetadata { + server_version: signature.metadata.server_version.clone(), + }, + }) +} + +#[cfg(feature = "stacks-signers")] +pub fn standardize_stacks_signer_peer_info( + peer_info: &stacks_codec::codec::PeerInfo, +) -> Result { + let block_hash = format!("0x{}", peer_info.stacks_tip.to_hex()); + Ok(PeerInfoData { + burn_block_height: peer_info.burn_block_height, + stacks_tip_consensus_hash: format!("0x{}", peer_info.stacks_tip_consensus_hash.to_hex()), + stacks_tip: block_hash.clone(), + stacks_tip_height: peer_info.stacks_tip_height, + pox_consensus: format!("0x{}", peer_info.pox_consensus.to_hex()), + server_version: peer_info.server_version.clone(), + network_id: peer_info.network_id, + index_block_hash: get_nakamoto_index_block_hash( + &block_hash, + &peer_info.stacks_tip_consensus_hash, + )?, + }) +} + #[cfg(feature = "stacks-signers")] pub fn standardize_stacks_nakamoto_block( block: &stacks_codec::codec::NakamotoBlock, diff --git a/components/chainhook-types-rs/src/signers.rs b/components/chainhook-types-rs/src/signers.rs index c0754dd28..3cb78d7f2 100644 --- a/components/chainhook-types-rs/src/signers.rs +++ b/components/chainhook-types-rs/src/signers.rs @@ -91,13 +91,44 @@ pub struct BlockPushedData { pub block: NakamotoBlockData, } +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct PeerInfoData { + pub burn_block_height: u64, + pub stacks_tip_consensus_hash: String, + pub stacks_tip: String, + pub stacks_tip_height: u64, + pub pox_consensus: String, + pub server_version: String, + pub network_id: u32, + pub index_block_hash: String, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct MockProposalData { + pub peer_info: PeerInfoData, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct MockSignatureData { + pub mock_proposal: MockProposalData, + pub metadata: SignerMessageMetadata +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct MockBlockData { + pub mock_proposal: MockProposalData, + pub mock_signatures: Vec +} + #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(tag = "type", content = "data")] pub enum StacksSignerMessage { BlockProposal(BlockProposalData), BlockResponse(BlockResponseData), BlockPushed(BlockPushedData), - // TODO(rafaelcr): Add mock messages + MockSignature(MockSignatureData), + MockProposal(PeerInfoData), + MockBlock(MockBlockData), } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] diff --git a/components/client/typescript/package-lock.json b/components/client/typescript/package-lock.json index 55291fdc9..af8a39f3a 100644 --- a/components/client/typescript/package-lock.json +++ b/components/client/typescript/package-lock.json @@ -1,12 +1,12 @@ { "name": "@hirosystems/chainhook-client", - "version": "2.1.1", + "version": "2.2.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@hirosystems/chainhook-client", - "version": "2.1.1", + "version": "2.2.0", "license": "Apache 2.0", "dependencies": { "@fastify/type-provider-typebox": "^3.2.0", diff --git a/components/client/typescript/package.json b/components/client/typescript/package.json index 5cb41df87..f558fb8d4 100644 --- a/components/client/typescript/package.json +++ b/components/client/typescript/package.json @@ -1,6 +1,6 @@ { "name": "@hirosystems/chainhook-client", - "version": "2.1.1", + "version": "2.2.0", "description": "Chainhook TypeScript client", "main": "./dist/index.js", "typings": "./dist/index.d.ts", diff --git a/components/client/typescript/src/schemas/stacks/signers.ts b/components/client/typescript/src/schemas/stacks/signers.ts index 7b9ceeb8f..57503acc6 100644 --- a/components/client/typescript/src/schemas/stacks/signers.ts +++ b/components/client/typescript/src/schemas/stacks/signers.ts @@ -50,6 +50,11 @@ export type StacksSignerMessageBlockResponseAccepted = Static< typeof StacksSignerMessageBlockResponseAcceptedSchema >; +export const StacksSignerMessageMetadataSchema = Type.Object({ + server_version: Type.String(), +}); +export type StacksSignerMessageMetadata = Static; + export const StacksSignerMessageBlockResponseRejectedSchema = Type.Object({ type: Type.Literal('Rejected'), data: Type.Object({ @@ -75,9 +80,7 @@ export const StacksSignerMessageBlockResponseRejectedSchema = Type.Object({ signer_signature_hash: Type.String(), chain_id: Type.Integer(), signature: Type.String(), - metadata: Type.Object({ - server_version: Type.String(), - }), + metadata: StacksSignerMessageMetadataSchema, }), }); export type StacksSignerMessageBlockResponseRejected = Static< @@ -103,10 +106,63 @@ export const StacksSignerMessageBlockPushedSchema = Type.Object({ }); export type StacksSignerMessageBlockPushed = Static; +export const StacksSignerMessagePeerInfoSchema = Type.Object({ + burn_block_height: Type.Integer(), + stacks_tip_consensus_hash: Type.String(), + stacks_tip: Type.String(), + stacks_tip_height: Type.Integer(), + pox_consensus: Type.String(), + server_version: Type.String(), + network_id: Type.Integer(), + index_block_hash: Type.String(), +}); +export type StacksSignerMessagePeerInfo = Static; + +export const StacksSignerMessageMockProposalDataSchema = Type.Object({ + peer_info: StacksSignerMessagePeerInfoSchema, +}); +export type StacksSignerMessageMockProposalData = Static< + typeof StacksSignerMessageMockProposalDataSchema +>; + +export const StacksSignerMessageMockSignatureDataSchema = Type.Object({ + mock_proposal: StacksSignerMessageMockProposalDataSchema, + metadata: StacksSignerMessageMetadataSchema, +}); +export type StacksSignerMessageMockSignatureData = Static< + typeof StacksSignerMessageMockSignatureDataSchema +>; + +export const StacksSignerMessageMockSignatureSchema = Type.Object({ + type: Type.Literal('MockSignature'), + data: StacksSignerMessageMockSignatureDataSchema, +}); +export type StacksSignerMessageMockSignature = Static< + typeof StacksSignerMessageMockSignatureSchema +>; + +export const StacksSignerMessageMockProposalSchema = Type.Object({ + type: Type.Literal('MockProposal'), + data: StacksSignerMessagePeerInfoSchema, +}); +export type StacksSignerMessageMockProposal = Static; + +export const StacksSignerMessageMockBlockSchema = Type.Object({ + type: Type.Literal('MockBlock'), + data: Type.Object({ + mock_proposal: StacksSignerMessageMockProposalDataSchema, + mock_signatures: Type.Array(StacksSignerMessageMockSignatureDataSchema), + }), +}); +export type StacksSignerMessageMockBlock = Static; + export const StacksSignerMessageSchema = Type.Union([ StacksSignerMessageBlockProposalSchema, StacksSignerMessageBlockResponseSchema, StacksSignerMessageBlockPushedSchema, + StacksSignerMessageMockSignatureSchema, + StacksSignerMessageMockProposalSchema, + StacksSignerMessageMockBlockSchema, ]); export type StacksSignerMessage = Static; From b3a156f423187d3b53c636fa5acc392b58dbbc70 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Fri, 25 Oct 2024 21:15:37 +0000 Subject: [PATCH 13/25] chore(release): 1.9.0-stacks-signers.2 [skip ci] ## [1.9.0-stacks-signers.2](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.1...v1.9.0-stacks-signers.2) (2024-10-25) ### Features * add mock signer message support ([#669](https://github.com/hirosystems/chainhook/issues/669)) ([ad5fd54](https://github.com/hirosystems/chainhook/commit/ad5fd54b3d1d8f638fa44a531bca71306fbb8c6f)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d8cd3936c..3e4acea61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [1.9.0-stacks-signers.2](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.1...v1.9.0-stacks-signers.2) (2024-10-25) + + +### Features + +* add mock signer message support ([#669](https://github.com/hirosystems/chainhook/issues/669)) ([ad5fd54](https://github.com/hirosystems/chainhook/commit/ad5fd54b3d1d8f638fa44a531bca71306fbb8c6f)) + ## [1.9.0-stacks-signers.1](https://github.com/hirosystems/chainhook/compare/v1.8.0...v1.9.0-stacks-signers.1) (2024-10-25) From c414d793e9a25c1b541914fee256d98d53bc08a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Fri, 25 Oct 2024 15:46:23 -0600 Subject: [PATCH 14/25] feat: add mock signature pubkey (#670) --- Cargo.lock | 2 +- Cargo.toml | 2 +- .../chainhook-cli/src/storage/signers.rs | 42 ++++++++++--------- .../chainhook-sdk/src/indexer/stacks/mod.rs | 27 ++++++++---- components/chainhook-types-rs/src/signers.rs | 4 +- .../client/typescript/package-lock.json | 4 +- components/client/typescript/package.json | 2 +- .../typescript/src/schemas/stacks/signers.ts | 2 + 8 files changed, 52 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae93e54c9..95f17e410 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3552,7 +3552,7 @@ dependencies = [ [[package]] name = "stacks-codec" version = "2.10.0" -source = "git+https://github.com/hirosystems/clarinet.git?rev=fcebfb5a986ded32d5a450c34f8e5e5f2da97de4#fcebfb5a986ded32d5a450c34f8e5e5f2da97de4" +source = "git+https://github.com/hirosystems/clarinet.git?rev=3ec638a64171b7557613531f118f302992c99b89#3ec638a64171b7557613531f118f302992c99b89" dependencies = [ "clarity", "serde", diff --git a/Cargo.toml b/Cargo.toml index 97b5fc91f..f1e1171f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,4 +8,4 @@ default-members = ["components/chainhook-cli", "components/chainhook-sdk"] resolver = "2" [patch.crates-io] -stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "fcebfb5a986ded32d5a450c34f8e5e5f2da97de4" } +stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "3ec638a64171b7557613531f118f302992c99b89" } diff --git a/components/chainhook-cli/src/storage/signers.rs b/components/chainhook-cli/src/storage/signers.rs index c01c6abd3..a1f6a77bd 100644 --- a/components/chainhook-cli/src/storage/signers.rs +++ b/components/chainhook-cli/src/storage/signers.rs @@ -146,6 +146,8 @@ pub fn initialize_signers_db(base_dir: &PathBuf, ctx: &Context) -> Result, - peer_info: &PeerInfoData, - metadata: &SignerMessageMetadata, + mock_signature: &MockSignatureData, message_id: Option, mock_block_id: Option, ) -> Result<(), String> { - let mock_proposal_id = store_mock_proposal_peer_info(&db_tx, &peer_info, None)?; + let mock_proposal_id = + store_mock_proposal_peer_info(&db_tx, &mock_signature.mock_proposal.peer_info, None)?; let mut signature_stmt = db_tx .prepare( "INSERT INTO mock_signatures - (message_id, mock_proposal_id, mock_block_id, server_version) - VALUES (?,?,?,?)", + (message_id, mock_proposal_id, mock_block_id, server_version, signature, pubkey) + VALUES (?,?,?,?,?,?)", ) .map_err(|e| format!("unable to prepare statement: {e}"))?; signature_stmt @@ -213,7 +215,9 @@ fn store_mock_signature( &message_id, &mock_proposal_id, &mock_block_id, - &metadata.server_version, + &mock_signature.metadata.server_version, + &mock_signature.signature, + &mock_signature.pubkey, ]) .map_err(|e| format!("unable to write mock signature: {e}"))?; Ok(()) @@ -435,13 +439,7 @@ pub fn store_signer_db_messages( "Storing stacks MockSignature by signer {}", chunk.pubkey ); - store_mock_signature( - &db_tx, - &data.mock_proposal.peer_info, - &data.metadata, - Some(message_id), - None, - )?; + store_mock_signature(&db_tx, &data, Some(message_id), None)?; } StacksSignerMessage::MockProposal(data) => { try_info!( @@ -477,8 +475,7 @@ pub fn store_signer_db_messages( for signature in data.mock_signatures.iter() { store_mock_signature( &db_tx, - &signature.mock_proposal.peer_info, - &signature.metadata, + &signature, None, Some(mock_block_id), )?; @@ -654,7 +651,8 @@ pub fn get_signer_db_messages_received_at_block( "mock_signature" => db_tx .query_row( "SELECT p.burn_block_height, p.stacks_tip_consensus_hash, p.stacks_tip, p.stacks_tip_height, - p.pox_consensus, p.server_version AS peer_version, p.network_id, s.server_version + p.pox_consensus, p.server_version AS peer_version, p.network_id, s.server_version, s.signature, + s.pubkey FROM mock_signatures AS s INNER JOIN mock_proposals AS p ON p.id = s.mock_proposal_id WHERE s.message_id = ?", @@ -675,7 +673,9 @@ pub fn get_signer_db_messages_received_at_block( }, metadata: SignerMessageMetadata { server_version: signature_row.get(8).unwrap() - } + }, + signature: signature_row.get(9).unwrap(), + pubkey: signature_row.get(10).unwrap() })) }, ) @@ -715,7 +715,7 @@ pub fn get_signer_db_messages_received_at_block( .prepare( "SELECT p.burn_block_height, p.stacks_tip_consensus_hash, p.stacks_tip, p.stacks_tip_height, p.pox_consensus, p.server_version AS peer_version, - p.network_id, p.index_block_hash, s.server_version + p.network_id, p.index_block_hash, s.server_version, s.signature, s.pubkey FROM mock_signatures AS s INNER JOIN mock_proposals AS p ON p.id = s.mock_proposal_id WHERE s.mock_block_id = ?")?; @@ -737,7 +737,9 @@ pub fn get_signer_db_messages_received_at_block( }, metadata: SignerMessageMetadata { server_version: signature_row.get(8).unwrap() - } + }, + signature: signature_row.get(9).unwrap(), + pubkey: signature_row.get(10).unwrap() }); } Ok(StacksSignerMessage::MockBlock(MockBlockData { diff --git a/components/chainhook-sdk/src/indexer/stacks/mod.rs b/components/chainhook-sdk/src/indexer/stacks/mod.rs index 3a4a12a86..6532d48f5 100644 --- a/components/chainhook-sdk/src/indexer/stacks/mod.rs +++ b/components/chainhook-sdk/src/indexer/stacks/mod.rs @@ -802,13 +802,16 @@ pub fn standardize_stacks_stackerdb_chunks( .mock_signatures .iter() .map(|signature| standardize_stacks_signer_mock_signature(signature)) - .try_fold(Vec::new(), |mut acc, item| -> Result, String> { - item.and_then(|val| { - acc.push(val); - Ok(()) - })?; - Ok(acc) - })?, + .try_fold( + Vec::new(), + |mut acc, item| -> Result, String> { + item.and_then(|val| { + acc.push(val); + Ok(()) + })?; + Ok(acc) + }, + )?, }), }; parsed_chunks.push(StacksStackerDbChunk { @@ -829,6 +832,14 @@ pub fn standardize_stacks_stackerdb_chunks( pub fn standardize_stacks_signer_mock_signature( signature: &stacks_codec::codec::MockSignature, ) -> Result { + let pubkey = get_signer_pubkey_from_message_hash( + &signature + .mock_proposal + .signer_signature_hash() + .as_bytes() + .to_vec(), + &signature.signature, + )?; Ok(MockSignatureData { mock_proposal: MockProposalData { peer_info: standardize_stacks_signer_peer_info(&signature.mock_proposal.peer_info)?, @@ -836,6 +847,8 @@ pub fn standardize_stacks_signer_mock_signature( metadata: SignerMessageMetadata { server_version: signature.metadata.server_version.clone(), }, + signature: format!("0x{}", signature.signature.to_hex()), + pubkey: format!("0x{}", hex::encode(pubkey)), }) } diff --git a/components/chainhook-types-rs/src/signers.rs b/components/chainhook-types-rs/src/signers.rs index 3cb78d7f2..a72647031 100644 --- a/components/chainhook-types-rs/src/signers.rs +++ b/components/chainhook-types-rs/src/signers.rs @@ -111,7 +111,9 @@ pub struct MockProposalData { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct MockSignatureData { pub mock_proposal: MockProposalData, - pub metadata: SignerMessageMetadata + pub metadata: SignerMessageMetadata, + pub signature: String, + pub pubkey: String, } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] diff --git a/components/client/typescript/package-lock.json b/components/client/typescript/package-lock.json index af8a39f3a..c753b9a6f 100644 --- a/components/client/typescript/package-lock.json +++ b/components/client/typescript/package-lock.json @@ -1,12 +1,12 @@ { "name": "@hirosystems/chainhook-client", - "version": "2.2.0", + "version": "2.3.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@hirosystems/chainhook-client", - "version": "2.2.0", + "version": "2.3.0", "license": "Apache 2.0", "dependencies": { "@fastify/type-provider-typebox": "^3.2.0", diff --git a/components/client/typescript/package.json b/components/client/typescript/package.json index f558fb8d4..78117ec36 100644 --- a/components/client/typescript/package.json +++ b/components/client/typescript/package.json @@ -1,6 +1,6 @@ { "name": "@hirosystems/chainhook-client", - "version": "2.2.0", + "version": "2.3.0", "description": "Chainhook TypeScript client", "main": "./dist/index.js", "typings": "./dist/index.d.ts", diff --git a/components/client/typescript/src/schemas/stacks/signers.ts b/components/client/typescript/src/schemas/stacks/signers.ts index 57503acc6..07f358a2b 100644 --- a/components/client/typescript/src/schemas/stacks/signers.ts +++ b/components/client/typescript/src/schemas/stacks/signers.ts @@ -128,6 +128,8 @@ export type StacksSignerMessageMockProposalData = Static< export const StacksSignerMessageMockSignatureDataSchema = Type.Object({ mock_proposal: StacksSignerMessageMockProposalDataSchema, metadata: StacksSignerMessageMetadataSchema, + signature: Type.String(), + pubkey: Type.String(), }); export type StacksSignerMessageMockSignatureData = Static< typeof StacksSignerMessageMockSignatureDataSchema From dab83e127a017d23b749eb377f2c68f4502ea6f7 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Fri, 25 Oct 2024 22:07:45 +0000 Subject: [PATCH 15/25] chore(release): 1.9.0-stacks-signers.3 [skip ci] ## [1.9.0-stacks-signers.3](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.2...v1.9.0-stacks-signers.3) (2024-10-25) ### Features * add mock signature pubkey ([#670](https://github.com/hirosystems/chainhook/issues/670)) ([c414d79](https://github.com/hirosystems/chainhook/commit/c414d793e9a25c1b541914fee256d98d53bc08a3)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e4acea61..4f3ee46a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [1.9.0-stacks-signers.3](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.2...v1.9.0-stacks-signers.3) (2024-10-25) + + +### Features + +* add mock signature pubkey ([#670](https://github.com/hirosystems/chainhook/issues/670)) ([c414d79](https://github.com/hirosystems/chainhook/commit/c414d793e9a25c1b541914fee256d98d53bc08a3)) + ## [1.9.0-stacks-signers.2](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.1...v1.9.0-stacks-signers.2) (2024-10-25) From 3c117b562d02b8b5379fd4b0f3187567d7f5289b Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Fri, 25 Oct 2024 16:24:51 -0600 Subject: [PATCH 16/25] fix: bump stacks codec --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 95f17e410..bb65c561a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3552,7 +3552,7 @@ dependencies = [ [[package]] name = "stacks-codec" version = "2.10.0" -source = "git+https://github.com/hirosystems/clarinet.git?rev=3ec638a64171b7557613531f118f302992c99b89#3ec638a64171b7557613531f118f302992c99b89" +source = "git+https://github.com/hirosystems/clarinet.git?rev=2ea8e2a104c2e9f3c6f90297bfd79daf596bf34d#2ea8e2a104c2e9f3c6f90297bfd79daf596bf34d" dependencies = [ "clarity", "serde", diff --git a/Cargo.toml b/Cargo.toml index f1e1171f6..530080e69 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,4 +8,4 @@ default-members = ["components/chainhook-cli", "components/chainhook-sdk"] resolver = "2" [patch.crates-io] -stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "3ec638a64171b7557613531f118f302992c99b89" } +stacks-codec = { git = "https://github.com/hirosystems/clarinet.git", rev = "2ea8e2a104c2e9f3c6f90297bfd79daf596bf34d" } From 186d3376a2a015cf8e65454578813c3777fd6e7e Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Fri, 25 Oct 2024 22:46:30 +0000 Subject: [PATCH 17/25] chore(release): 1.9.0-stacks-signers.4 [skip ci] ## [1.9.0-stacks-signers.4](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.3...v1.9.0-stacks-signers.4) (2024-10-25) ### Bug Fixes * bump stacks codec ([3c117b5](https://github.com/hirosystems/chainhook/commit/3c117b562d02b8b5379fd4b0f3187567d7f5289b)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f3ee46a0..b6b5edc38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [1.9.0-stacks-signers.4](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.3...v1.9.0-stacks-signers.4) (2024-10-25) + + +### Bug Fixes + +* bump stacks codec ([3c117b5](https://github.com/hirosystems/chainhook/commit/3c117b562d02b8b5379fd4b0f3187567d7f5289b)) + ## [1.9.0-stacks-signers.3](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.2...v1.9.0-stacks-signers.3) (2024-10-25) From 8bb59c1b4727a37df47e08b7a31456bcb6528289 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Fri, 25 Oct 2024 16:59:48 -0600 Subject: [PATCH 18/25] fix: mock signature retrieval (#671) --- components/chainhook-cli/src/storage/signers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/components/chainhook-cli/src/storage/signers.rs b/components/chainhook-cli/src/storage/signers.rs index a1f6a77bd..954ac6c34 100644 --- a/components/chainhook-cli/src/storage/signers.rs +++ b/components/chainhook-cli/src/storage/signers.rs @@ -651,8 +651,8 @@ pub fn get_signer_db_messages_received_at_block( "mock_signature" => db_tx .query_row( "SELECT p.burn_block_height, p.stacks_tip_consensus_hash, p.stacks_tip, p.stacks_tip_height, - p.pox_consensus, p.server_version AS peer_version, p.network_id, s.server_version, s.signature, - s.pubkey + p.pox_consensus, p.server_version AS peer_version, p.network_id, p.index_block_hash, s.server_version, + s.signature, s.pubkey FROM mock_signatures AS s INNER JOIN mock_proposals AS p ON p.id = s.mock_proposal_id WHERE s.message_id = ?", From c90091bd98cf53876b97d0d5a5624602e608e3f0 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Fri, 25 Oct 2024 23:09:15 +0000 Subject: [PATCH 19/25] chore(release): 1.9.0-stacks-signers.5 [skip ci] ## [1.9.0-stacks-signers.5](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.4...v1.9.0-stacks-signers.5) (2024-10-25) ### Bug Fixes * mock signature retrieval ([#671](https://github.com/hirosystems/chainhook/issues/671)) ([8bb59c1](https://github.com/hirosystems/chainhook/commit/8bb59c1b4727a37df47e08b7a31456bcb6528289)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b6b5edc38..5c71cb850 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [1.9.0-stacks-signers.5](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.4...v1.9.0-stacks-signers.5) (2024-10-25) + + +### Bug Fixes + +* mock signature retrieval ([#671](https://github.com/hirosystems/chainhook/issues/671)) ([8bb59c1](https://github.com/hirosystems/chainhook/commit/8bb59c1b4727a37df47e08b7a31456bcb6528289)) + ## [1.9.0-stacks-signers.4](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.3...v1.9.0-stacks-signers.4) (2024-10-25) From a01470e34876dcf67969063e1fc900ae947a0a54 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 31 Oct 2024 15:24:24 +0100 Subject: [PATCH 20/25] fix: read stacks-core http event POST payloads for ignored events (#673) Workaround for bug in stacks-core event emitter http code. Regression in stacks-core where http requests must now have their POST body read before closing the connection. ``` chainhook-1 | {"msg":"POST /drop_mempool_tx","level":"DEBUG","ts":"2024-10-30T10:41:34.030361652Z"} stacks-node-1 | WARN [1730284894.031170] [testnet/stacks-node/src/event_dispatcher.rs:496] [relayer-http://0.0.0.0:20443/] Event dispatcher: connection or request failed to chainhook:20455 - Custom { kind: Other, error: "Failed to send 8192 bytes: \"Failed to send socket data\"" }, backoff: 158.191s, attempts: 10 ``` Chainhook ignored some events by returning a 200 http response and closing the connection after reading the http request headers, and ignoring the request body. --- .../src/service/tests/observer_tests.rs | 4 ++-- components/chainhook-sdk/src/observer/http.rs | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/components/chainhook-cli/src/service/tests/observer_tests.rs b/components/chainhook-cli/src/service/tests/observer_tests.rs index 449ee1378..e06df1d87 100644 --- a/components/chainhook-cli/src/service/tests/observer_tests.rs +++ b/components/chainhook-cli/src/service/tests/observer_tests.rs @@ -170,8 +170,8 @@ async fn start_and_ping_event_observer(config: EventObserverConfig, ingestion_po .unwrap(); await_observer_started(ingestion_port).await; } -#[test_case("/drop_mempool_tx", Method::POST, None)] -#[test_case("/attachments/new", Method::POST, None)] +#[test_case("/drop_mempool_tx", Method::POST, Some(&json!({})))] +#[test_case("/attachments/new", Method::POST, Some(&json!({})))] #[test_case("/mined_block", Method::POST, Some(&json!({})))] #[test_case("/mined_microblock", Method::POST, Some(&json!({})))] #[tokio::test] diff --git a/components/chainhook-sdk/src/observer/http.rs b/components/chainhook-sdk/src/observer/http.rs index 5a5938f51..8f0cbc76a 100644 --- a/components/chainhook-sdk/src/observer/http.rs +++ b/components/chainhook-sdk/src/observer/http.rs @@ -301,9 +301,9 @@ pub fn handle_new_mempool_tx( success_response() } -#[post("/drop_mempool_tx", format = "application/json")] -pub fn handle_drop_mempool_tx(ctx: &State) -> Json { - ctx.try_log(|logger| slog::debug!(logger, "POST /drop_mempool_tx")); +#[post("/drop_mempool_tx", format = "application/json", data = "")] +pub fn handle_drop_mempool_tx(payload: Json, ctx: &State) -> Json { + ctx.try_log(|logger| slog::debug!(logger, "POST /drop_mempool_tx {:?}", payload)); // TODO(lgalabru): use propagate mempool events Json(json!({ "status": 200, @@ -311,9 +311,9 @@ pub fn handle_drop_mempool_tx(ctx: &State) -> Json { })) } -#[post("/attachments/new", format = "application/json")] -pub fn handle_new_attachement(ctx: &State) -> Json { - ctx.try_log(|logger| slog::debug!(logger, "POST /attachments/new")); +#[post("/attachments/new", format = "application/json", data = "")] +pub fn handle_new_attachement(payload: Json, ctx: &State) -> Json { + ctx.try_log(|logger| slog::debug!(logger, "POST /attachments/new {:?}", payload)); Json(json!({ "status": 200, "result": "Ok", From 367362afbb46c4ad37bac3c3ff8e44f4ac25caac Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Thu, 31 Oct 2024 14:41:14 +0000 Subject: [PATCH 21/25] chore(release): 1.9.0-stacks-signers.6 [skip ci] ## [1.9.0-stacks-signers.6](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.5...v1.9.0-stacks-signers.6) (2024-10-31) ### Bug Fixes * read stacks-core http event POST payloads for ignored events ([#673](https://github.com/hirosystems/chainhook/issues/673)) ([a01470e](https://github.com/hirosystems/chainhook/commit/a01470e34876dcf67969063e1fc900ae947a0a54)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c71cb850..7555c67be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [1.9.0-stacks-signers.6](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.5...v1.9.0-stacks-signers.6) (2024-10-31) + + +### Bug Fixes + +* read stacks-core http event POST payloads for ignored events ([#673](https://github.com/hirosystems/chainhook/issues/673)) ([a01470e](https://github.com/hirosystems/chainhook/commit/a01470e34876dcf67969063e1fc900ae947a0a54)) + ## [1.9.0-stacks-signers.5](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.4...v1.9.0-stacks-signers.5) (2024-10-25) From 1bc949a6ce17ab67c2c1af2d21fe330461689f49 Mon Sep 17 00:00:00 2001 From: Chris Guimaraes Date: Wed, 6 Nov 2024 16:43:16 +0000 Subject: [PATCH 22/25] fix: propagate error when trying to store peer info --- components/chainhook-cli/src/storage/signers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/components/chainhook-cli/src/storage/signers.rs b/components/chainhook-cli/src/storage/signers.rs index 954ac6c34..40e2f7b82 100644 --- a/components/chainhook-cli/src/storage/signers.rs +++ b/components/chainhook-cli/src/storage/signers.rs @@ -49,7 +49,7 @@ pub fn initialize_signers_db(base_dir: &PathBuf, ctx: &Context) -> Result { try_info!(ctx, "Storing stacks MockBlock by signer {}", chunk.pubkey); From defd86f13af6fe2654e841a16c4fb2d33804dc60 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Thu, 7 Nov 2024 15:22:13 +0000 Subject: [PATCH 23/25] chore(release): 1.9.0-stacks-signers.7 [skip ci] ## [1.9.0-stacks-signers.7](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.6...v1.9.0-stacks-signers.7) (2024-11-07) ### Bug Fixes * propagate error when trying to store peer info ([1bc949a](https://github.com/hirosystems/chainhook/commit/1bc949a6ce17ab67c2c1af2d21fe330461689f49)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7555c67be..9fd264e1f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [1.9.0-stacks-signers.7](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.6...v1.9.0-stacks-signers.7) (2024-11-07) + + +### Bug Fixes + +* propagate error when trying to store peer info ([1bc949a](https://github.com/hirosystems/chainhook/commit/1bc949a6ce17ab67c2c1af2d21fe330461689f49)) + ## [1.9.0-stacks-signers.6](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.5...v1.9.0-stacks-signers.6) (2024-10-31) From 1f9f37ee8c9afa864fc618b761195b0d0711dfbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Tue, 12 Nov 2024 09:09:05 -0600 Subject: [PATCH 24/25] fix: only import stacks tsv if chainstate is empty (#684) Also, remove the old code that attempted to re-import a TSV every 32 Stacks blocks Fixes #677 --- components/chainhook-cli/src/archive/mod.rs | 2 +- components/chainhook-cli/src/cli/mod.rs | 13 +- components/chainhook-cli/src/config/mod.rs | 13 +- components/chainhook-cli/src/scan/stacks.rs | 172 +++++++++--------- components/chainhook-cli/src/service/mod.rs | 27 --- .../src/service/tests/helpers/mock_service.rs | 4 +- .../src/service/tests/runloop_tests.rs | 4 +- 7 files changed, 108 insertions(+), 127 deletions(-) diff --git a/components/chainhook-cli/src/archive/mod.rs b/components/chainhook-cli/src/archive/mod.rs index 084ddc3c0..6d8c971ef 100644 --- a/components/chainhook-cli/src/archive/mod.rs +++ b/components/chainhook-cli/src/archive/mod.rs @@ -132,7 +132,7 @@ pub async fn download_stacks_dataset_if_required( ) -> Result { if config.is_initial_ingestion_required() { // Download default tsv. - if config.rely_on_remote_stacks_tsv() && config.should_download_remote_stacks_tsv() { + if config.contains_remote_stacks_tsv_url() && config.should_download_remote_stacks_tsv() { let url = config.expected_remote_stacks_tsv_url()?; let mut tsv_file_path = config.expected_cache_path(); tsv_file_path.push(default_tsv_file_path(&config.network.stacks_network)); diff --git a/components/chainhook-cli/src/cli/mod.rs b/components/chainhook-cli/src/cli/mod.rs index 219950f36..b962fcdd1 100644 --- a/components/chainhook-cli/src/cli/mod.rs +++ b/components/chainhook-cli/src/cli/mod.rs @@ -2,7 +2,7 @@ use crate::config::generator::generate_config; use crate::config::Config; use crate::scan::bitcoin::scan_bitcoin_chainstate_via_rpc_using_predicate; use crate::scan::stacks::{ - consolidate_local_stacks_chainstate_using_csv, scan_stacks_chainstate_via_csv_using_predicate, + import_stacks_chainstate_from_remote_tsv, scan_stacks_chainstate_via_csv_using_predicate, scan_stacks_chainstate_via_rocksdb_using_predicate, }; use crate::service::http_api::document_predicate_api_server; @@ -24,6 +24,7 @@ use chainhook_sdk::chainhooks::stacks::StacksChainhookSpecificationNetworkMap; use chainhook_sdk::chainhooks::stacks::StacksPredicate; use chainhook_sdk::chainhooks::stacks::StacksPrintEventBasedPredicate; use chainhook_sdk::chainhooks::types::{ChainhookSpecificationNetworkMap, FileHook, HookAction}; +use chainhook_sdk::try_info; use chainhook_sdk::types::{BitcoinNetwork, BlockIdentifier, StacksNetwork}; use chainhook_sdk::utils::{BlockHeights, Context}; use clap::{Parser, Subcommand}; @@ -342,19 +343,17 @@ async fn handle_command(opts: Opts, ctx: Context) -> Result<(), String> { ServiceCommand::Start(cmd) => { let mut config = Config::default(cmd.devnet, cmd.testnet, cmd.mainnet, &cmd.config_path)?; - if cmd.prometheus_monitoring_port.is_some() { config.monitoring.prometheus_monitoring_port = cmd.prometheus_monitoring_port; } - let predicates = cmd .predicates_paths .iter() .map(|p| load_predicate_from_path(p)) .collect::, _>>()?; - info!(ctx.expect_logger(), "Starting service...",); - + try_info!(ctx, "Starting chainhook service"); + import_stacks_chainstate_from_remote_tsv(&mut config, &ctx).await?; let mut service = Service::new(config, ctx); return service.run(predicates, None).await; } @@ -541,7 +540,7 @@ async fn handle_command(opts: Opts, ctx: Context) -> Result<(), String> { }; match open_readonly_stacks_db_conn(&config.expected_cache_path(), &ctx) { Ok(_) => { - let _ = consolidate_local_stacks_chainstate_using_csv( + let _ = import_stacks_chainstate_from_remote_tsv( &mut config, &ctx, ) @@ -812,7 +811,7 @@ async fn handle_command(opts: Opts, ctx: Context) -> Result<(), String> { } StacksCommand::Db(StacksDbCommand::Update(cmd)) => { let mut config = Config::default(false, false, false, &cmd.config_path)?; - consolidate_local_stacks_chainstate_using_csv(&mut config, &ctx).await?; + import_stacks_chainstate_from_remote_tsv(&mut config, &ctx).await?; } StacksCommand::Db(StacksDbCommand::Check(cmd)) => { let config = Config::default(false, false, false, &cmd.config_path)?; diff --git a/components/chainhook-cli/src/config/mod.rs b/components/chainhook-cli/src/config/mod.rs index cba31806e..3d498903c 100644 --- a/components/chainhook-cli/src/config/mod.rs +++ b/components/chainhook-cli/src/config/mod.rs @@ -304,6 +304,17 @@ impl Config { destination_path } + pub fn is_cache_path_empty(&self) -> Result { + let mut dir = match std::fs::read_dir(self.expected_cache_path()) { + Ok(dir) => dir, + Err(error) => match error.kind() { + std::io::ErrorKind::NotFound => return Ok(true), + _ => return Err(format!("unable to read cache directory: {error}")) + }, + }; + Ok(dir.next().is_none()) + } + fn expected_remote_stacks_tsv_base_url(&self) -> Result<&String, String> { for source in self.event_sources.iter() { if let EventSourceConfig::StacksTsvUrl(config) = source { @@ -323,7 +334,7 @@ impl Config { .map(|url| format!("{}.gz", url)) } - pub fn rely_on_remote_stacks_tsv(&self) -> bool { + pub fn contains_remote_stacks_tsv_url(&self) -> bool { for source in self.event_sources.iter() { if let EventSourceConfig::StacksTsvUrl(_config) = source { return true; diff --git a/components/chainhook-cli/src/scan/stacks.rs b/components/chainhook-cli/src/scan/stacks.rs index 77d74dad5..5c62fdefb 100644 --- a/components/chainhook-cli/src/scan/stacks.rs +++ b/components/chainhook-cli/src/scan/stacks.rs @@ -23,6 +23,7 @@ use crate::{ use chainhook_sdk::{ chainhooks::stacks::evaluate_stacks_chainhook_on_blocks, indexer::{self, stacks::standardize_stacks_serialized_block_header, Indexer}, + try_info, utils::Context, }; use chainhook_sdk::{ @@ -338,11 +339,8 @@ pub async fn scan_stacks_chainstate_via_rocksdb_using_predicate( &mut db_conns.signers_db, &block_data.block_identifier, )?; - let (hits_per_events, _) = evaluate_stacks_predicate_on_non_consensus_events( - &events, - predicate_spec, - ctx, - ); + let (hits_per_events, _) = + evaluate_stacks_predicate_on_non_consensus_events(&events, predicate_spec, ctx); if hits_per_blocks.is_empty() && hits_per_events.is_empty() { continue; @@ -584,101 +582,101 @@ pub async fn scan_stacks_chainstate_via_csv_using_predicate( Ok(last_block_scanned) } -pub async fn consolidate_local_stacks_chainstate_using_csv( +/// Downloads a remote archive TSV that contains Stacks node events and imports it into chainhook in order to fill up the Stacks +/// blocks database. This import will only happen if chainhook is starting from a fresh install with an empty index. +pub async fn import_stacks_chainstate_from_remote_tsv( config: &mut Config, ctx: &Context, ) -> Result<(), String> { + #[cfg(not(test))] + { + if !config.is_cache_path_empty()? { + try_info!(ctx, "A Stacks chainstate already exists, skipping TSV chainstante import"); + return Ok(()); + } + if !config.contains_remote_stacks_tsv_url() { + try_info!(ctx, "No remote Stacks TSV location was specified in config file, skipping TSV chainstante import"); + return Ok(()); + } + } + try_info!(ctx, "Importing Stacks chainstate from TSV"); + + download_stacks_dataset_if_required(config, ctx).await?; + let stacks_db = open_readonly_stacks_db_conn_with_retry(&config.expected_cache_path(), 3, ctx)?; + let confirmed_tip = get_last_block_height_inserted(&stacks_db, ctx); + let mut canonical_fork: VecDeque<(BlockIdentifier, BlockIdentifier, u64)> = + get_canonical_fork_from_tsv(config, confirmed_tip, ctx).await?; + + let mut indexer = Indexer::new(config.network.clone()); + let mut blocks_inserted = 0; + let mut blocks_read = 0; + let blocks_to_insert = canonical_fork.len(); + let stacks_db_rw = open_readwrite_stacks_db_conn(&config.expected_cache_path(), ctx)?; info!( ctx.expect_logger(), - "Building local chainstate from Stacks archive file" + "Beginning import of {} Stacks blocks into rocks db", blocks_to_insert ); + // TODO: To avoid repeating code with `scan_stacks_chainstate_via_csv_using_predicate`, we should move this block + // retrieval code into a reusable function. + let tsv_path = config.expected_local_stacks_tsv_file()?.clone(); + let mut tsv_reader = BufReader::new(File::open(tsv_path).map_err(|e| e.to_string())?); + let mut tsv_current_line = 0; + for (block_identifier, _parent_block_identifier, tsv_line_number) in canonical_fork.drain(..) { + blocks_read += 1; - let downloaded_new_dataset = download_stacks_dataset_if_required(config, ctx).await?; - if downloaded_new_dataset { - let stacks_db = - open_readonly_stacks_db_conn_with_retry(&config.expected_cache_path(), 3, ctx)?; - let confirmed_tip = get_last_block_height_inserted(&stacks_db, ctx); - let mut canonical_fork: VecDeque<(BlockIdentifier, BlockIdentifier, u64)> = - get_canonical_fork_from_tsv(config, confirmed_tip, ctx).await?; - - let mut indexer = Indexer::new(config.network.clone()); - let mut blocks_inserted = 0; - let mut blocks_read = 0; - let blocks_to_insert = canonical_fork.len(); - let stacks_db_rw = open_readwrite_stacks_db_conn(&config.expected_cache_path(), ctx)?; - info!( - ctx.expect_logger(), - "Beginning import of {} Stacks blocks into rocks db", blocks_to_insert - ); - // TODO: To avoid repeating code with `scan_stacks_chainstate_via_csv_using_predicate`, we should move this block - // retrieval code into a reusable function. - let tsv_path = config.expected_local_stacks_tsv_file()?.clone(); - let mut tsv_reader = BufReader::new(File::open(tsv_path).map_err(|e| e.to_string())?); - let mut tsv_current_line = 0; - for (block_identifier, _parent_block_identifier, tsv_line_number) in - canonical_fork.drain(..) - { - blocks_read += 1; + // If blocks already stored, move on + if is_stacks_block_present(&block_identifier, 3, &stacks_db_rw) { + continue; + } + blocks_inserted += 1; - // If blocks already stored, move on - if is_stacks_block_present(&block_identifier, 3, &stacks_db_rw) { - continue; - } - blocks_inserted += 1; - - // Seek to required line from TSV and retrieve its block payload. - let mut tsv_line = String::new(); - while tsv_current_line < tsv_line_number { - tsv_line.clear(); - let bytes_read = tsv_reader - .read_line(&mut tsv_line) - .map_err(|e| e.to_string())?; - if bytes_read == 0 { - return Err("Unexpected EOF when reading TSV".to_string()); - } - tsv_current_line += 1; + // Seek to required line from TSV and retrieve its block payload. + let mut tsv_line = String::new(); + while tsv_current_line < tsv_line_number { + tsv_line.clear(); + let bytes_read = tsv_reader + .read_line(&mut tsv_line) + .map_err(|e| e.to_string())?; + if bytes_read == 0 { + return Err("Unexpected EOF when reading TSV".to_string()); } - let Some(serialized_block) = tsv_line.split('\t').last() else { - return Err("Unable to retrieve serialized block from TSV line".to_string()); - }; - - let block_data = match indexer::stacks::standardize_stacks_serialized_block( - &indexer.config, - serialized_block, - &mut indexer.stacks_context, - ctx, - ) { - Ok(block) => block, - Err(e) => { - error!( - &ctx.expect_logger(), - "Failed to standardize stacks block: {e}" - ); - continue; - } - }; - - // TODO(rafaelcr): Store signer messages - insert_entry_in_stacks_blocks(&block_data, &stacks_db_rw, ctx)?; + tsv_current_line += 1; + } + let Some(serialized_block) = tsv_line.split('\t').last() else { + return Err("Unable to retrieve serialized block from TSV line".to_string()); + }; - if blocks_inserted % 2500 == 0 { - info!( - ctx.expect_logger(), - "Importing Stacks blocks into rocks db: {}/{}", blocks_read, blocks_to_insert + let block_data = match indexer::stacks::standardize_stacks_serialized_block( + &indexer.config, + serialized_block, + &mut indexer.stacks_context, + ctx, + ) { + Ok(block) => block, + Err(e) => { + error!( + &ctx.expect_logger(), + "Failed to standardize stacks block: {e}" ); - let _ = stacks_db_rw.flush(); + continue; } + }; + + // TODO(rafaelcr): Store signer messages + insert_entry_in_stacks_blocks(&block_data, &stacks_db_rw, ctx)?; + + if blocks_inserted % 2500 == 0 { + info!( + ctx.expect_logger(), + "Importing Stacks blocks into rocks db: {}/{}", blocks_read, blocks_to_insert + ); + let _ = stacks_db_rw.flush(); } - let _ = stacks_db_rw.flush(); - info!( - ctx.expect_logger(), - "{blocks_read} Stacks blocks read, {blocks_inserted} inserted" - ); - } else { - info!( - ctx.expect_logger(), - "Skipping database consolidation - no new archive found since last consolidation." - ); } + let _ = stacks_db_rw.flush(); + info!( + ctx.expect_logger(), + "{blocks_read} Stacks blocks read, {blocks_inserted} inserted" + ); Ok(()) } diff --git a/components/chainhook-cli/src/service/mod.rs b/components/chainhook-cli/src/service/mod.rs index ea22ef976..a23e1bbfb 100644 --- a/components/chainhook-cli/src/service/mod.rs +++ b/components/chainhook-cli/src/service/mod.rs @@ -2,7 +2,6 @@ pub(crate) mod http_api; mod runloops; use crate::config::{Config, PredicatesApi, PredicatesApiConfig}; -use crate::scan::stacks::consolidate_local_stacks_chainstate_using_csv; use crate::service::http_api::{load_predicates_from_redis, start_predicate_api_server}; use crate::service::runloops::{start_bitcoin_scan_runloop, start_stacks_scan_runloop}; use crate::storage::signers::{initialize_signers_db, store_signer_db_messages}; @@ -164,11 +163,6 @@ impl Service { let mut event_observer_config = self.config.get_event_observer_config(); event_observer_config.registered_chainhooks = chainhook_store; - // Download and ingest a Stacks dump - if self.config.rely_on_remote_stacks_tsv() { - consolidate_local_stacks_chainstate_using_csv(&mut self.config, &self.ctx).await?; - } - // Stacks scan operation threadpool let (stacks_scan_op_tx, stacks_scan_op_rx) = crossbeam_channel::unbounded(); let ctx = self.ctx.clone(); @@ -292,8 +286,6 @@ impl Service { self.ctx.clone(), ); - let mut stacks_event = 0; - let ctx = self.ctx.clone(); match self.config.http_api { PredicatesApi::On(ref api_config) => { @@ -586,7 +578,6 @@ impl Service { match &chain_event { StacksChainEvent::ChainUpdatedWithBlocks(data) => { - stacks_event += 1; for confirmed_block in &data.confirmed_blocks { if let Some(expired_predicate_uuids) = expire_predicates_for_block( @@ -649,24 +640,6 @@ impl Service { &ctx, ); }; - - // Every 32 blocks, we will check if there's a new Stacks file archive to ingest - if stacks_event > 32 { - stacks_event = 0; - if self.config.rely_on_remote_stacks_tsv() { - if let Err(e) = consolidate_local_stacks_chainstate_using_csv( - &mut self.config, - &self.ctx, - ) - .await - { - error!( - self.ctx.expect_logger(), - "Failed to update database from archive: {e}" - ) - }; - } - } } ObserverEvent::PredicateInterrupted(PredicateInterruptedData { predicate_key, diff --git a/components/chainhook-cli/src/service/tests/helpers/mock_service.rs b/components/chainhook-cli/src/service/tests/helpers/mock_service.rs index e42e39d5a..5de156ad5 100644 --- a/components/chainhook-cli/src/service/tests/helpers/mock_service.rs +++ b/components/chainhook-cli/src/service/tests/helpers/mock_service.rs @@ -2,7 +2,7 @@ use crate::config::{ Config, EventSourceConfig, LimitsConfig, MonitoringConfig, PathConfig, PredicatesApi, PredicatesApiConfig, StorageConfig, DEFAULT_REDIS_URI, }; -use crate::scan::stacks::consolidate_local_stacks_chainstate_using_csv; +use crate::scan::stacks::import_stacks_chainstate_from_remote_tsv; use crate::service::{ http_api::start_predicate_api_server, update_predicate_spec, update_predicate_status, PredicateStatus, Service, @@ -442,7 +442,7 @@ pub async fn setup_stacks_chainhook_test( Some(prometheus_port), ); - consolidate_local_stacks_chainstate_using_csv(&mut config, &ctx) + import_stacks_chainstate_from_remote_tsv(&mut config, &ctx) .await .unwrap_or_else(|e| { std::fs::remove_dir_all(&working_dir).unwrap(); diff --git a/components/chainhook-cli/src/service/tests/runloop_tests.rs b/components/chainhook-cli/src/service/tests/runloop_tests.rs index 43a930464..093f07f8d 100644 --- a/components/chainhook-cli/src/service/tests/runloop_tests.rs +++ b/components/chainhook-cli/src/service/tests/runloop_tests.rs @@ -12,7 +12,7 @@ use chainhook_sdk::{ use crate::{ config::{Config, EventSourceConfig, PathConfig}, - scan::stacks::consolidate_local_stacks_chainstate_using_csv, + scan::stacks::import_stacks_chainstate_from_remote_tsv, service::{ runloops::{ start_bitcoin_scan_runloop, start_stacks_scan_runloop, BitcoinScanOp, StacksScanOp, @@ -49,7 +49,7 @@ async fn test_stacks_runloop_kill_scan() { tracer: false, }; - consolidate_local_stacks_chainstate_using_csv(&mut config, &ctx) + import_stacks_chainstate_from_remote_tsv(&mut config, &ctx) .await .unwrap_or_else(|e| { std::fs::remove_dir_all(&working_dir).unwrap(); From 7b7cdf961d8fb114448842d2968e40a60587855e Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Tue, 12 Nov 2024 15:17:34 +0000 Subject: [PATCH 25/25] chore(release): 1.9.0-stacks-signers.8 [skip ci] ## [1.9.0-stacks-signers.8](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.7...v1.9.0-stacks-signers.8) (2024-11-12) ### Bug Fixes * only import stacks tsv if chainstate is empty ([#684](https://github.com/hirosystems/chainhook/issues/684)) ([1f9f37e](https://github.com/hirosystems/chainhook/commit/1f9f37ee8c9afa864fc618b761195b0d0711dfbe)), closes [#677](https://github.com/hirosystems/chainhook/issues/677) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fd264e1f..2ee9288f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [1.9.0-stacks-signers.8](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.7...v1.9.0-stacks-signers.8) (2024-11-12) + + +### Bug Fixes + +* only import stacks tsv if chainstate is empty ([#684](https://github.com/hirosystems/chainhook/issues/684)) ([1f9f37e](https://github.com/hirosystems/chainhook/commit/1f9f37ee8c9afa864fc618b761195b0d0711dfbe)), closes [#677](https://github.com/hirosystems/chainhook/issues/677) + ## [1.9.0-stacks-signers.7](https://github.com/hirosystems/chainhook/compare/v1.9.0-stacks-signers.6...v1.9.0-stacks-signers.7) (2024-11-07)