From 0f57007ab339dd64ba832432e6e4e74f3d9832da Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 6 Dec 2024 11:18:13 -0700 Subject: [PATCH] CLIENT-3171 CLIENT-3192 CLIENT-3202 CLIENT-3206 Support multi-record transactions (#140) --- .github/actions/run-ee-server/action.yml | 77 + .../wait-for-as-server-to-start/action.yml | 28 + .github/workflows/build-artifacts.yml | 70 + .../workflows/docker-build-context/Dockerfile | 39 + .../workflows/docker-build-context/roster.smd | 12 + .../docker-build-context/security.smd | 48 + .github/workflows/tests.yml | 58 + .../wait-for-as-server-to-start.bash | 47 + AerospikeClient/Admin/Role.cs | 10 +- AerospikeClient/Async/AsyncBatch.cs | 4088 ++++++------ AerospikeClient/Async/AsyncClient.cs | 269 +- AerospikeClient/Async/AsyncClientPolicy.cs | 2 +- AerospikeClient/Async/AsyncCluster.cs | 4 +- AerospikeClient/Async/AsyncCommand.cs | 26 +- AerospikeClient/Async/AsyncConnectionTls.cs | 2 +- AerospikeClient/Async/AsyncConnector.cs | 2 +- AerospikeClient/Async/AsyncDelete.cs | 216 +- AerospikeClient/Async/AsyncExecute.cs | 77 +- AerospikeClient/Async/AsyncExists.cs | 40 +- AerospikeClient/Async/AsyncMultiCommand.cs | 279 +- AerospikeClient/Async/AsyncOperateRead.cs | 41 + AerospikeClient/Async/AsyncOperateWrite.cs | 90 + .../Async/AsyncQueryPartitionExecutor.cs | 4 +- AerospikeClient/Async/AsyncRead.cs | 113 +- AerospikeClient/Async/AsyncReadBase.cs | 65 + AerospikeClient/Async/AsyncReadHeader.cs | 44 +- .../Async/AsyncScanPartitionExecutor.cs | 4 +- AerospikeClient/Async/AsyncSingleCommand.cs | 91 +- AerospikeClient/Async/AsyncTouch.cs | 262 +- .../{AsyncOperate.cs => AsyncTxnAddKeys.cs} | 66 +- AerospikeClient/Async/AsyncTxnClose.cs | 89 + .../Async/AsyncTxnMarkRollForward.cs | 87 + AerospikeClient/Async/AsyncTxnMonitor.cs | 202 + AerospikeClient/Async/AsyncTxnRoll.cs | 579 ++ AerospikeClient/Async/AsyncWrite.cs | 225 +- AerospikeClient/Async/AsyncWriteBase.cs | 80 + AerospikeClient/Async/IAsyncClient.cs | 74 +- .../AsyncTask/AbortListenerAdapter.cs | 33 + .../BatchOperateListListenerAdapter.cs | 2 +- .../AsyncTask/CommitListenerAdapter.cs | 38 + AerospikeClient/Cluster/Cluster.cs | 93 +- AerospikeClient/Cluster/ClusterStats.cs | 10 +- AerospikeClient/Cluster/ConnectionRecover.cs | 2 +- AerospikeClient/Cluster/Node.cs | 14 +- AerospikeClient/Cluster/NodeValidator.cs | 5 + AerospikeClient/Command/Batch.cs | 1499 +++-- AerospikeClient/Command/BatchAttr.cs | 27 + AerospikeClient/Command/BatchExecutor.cs | 337 +- AerospikeClient/Command/ByteUtil.cs | 57 +- AerospikeClient/Command/Command.cs | 5673 ++++++++++------- AerospikeClient/Command/DeleteCommand.cs | 167 +- AerospikeClient/Command/ExecuteCommand.cs | 76 +- AerospikeClient/Command/ExistsCommand.cs | 28 +- AerospikeClient/Command/FieldType.cs | 7 +- AerospikeClient/Command/MultiCommand.cs | 5 - AerospikeClient/Command/OperateArgs.cs | 15 +- AerospikeClient/Command/OperateCommandRead.cs | 35 + .../Command/OperateCommandWrite.cs | 61 + AerospikeClient/Command/ReadCommand.cs | 92 +- AerospikeClient/Command/ReadHeaderCommand.cs | 29 +- AerospikeClient/Command/ScanExecutor.cs | 2 +- AerospikeClient/Command/SyncCommand.cs | 881 +-- AerospikeClient/Command/SyncReadCommand.cs | 53 + ...{OperateCommand.cs => SyncWriteCommand.cs} | 51 +- AerospikeClient/Command/TouchCommand.cs | 180 +- AerospikeClient/Command/TxnAddKeys.cs | 50 + AerospikeClient/Command/TxnClose.cs | 52 + AerospikeClient/Command/TxnMarkRollForward.cs | 51 + AerospikeClient/Command/TxnMonitor.cs | 171 + AerospikeClient/Command/TxnRoll.cs | 342 + AerospikeClient/Command/WriteCommand.cs | 145 +- AerospikeClient/Exp/Exp.cs | 4 +- AerospikeClient/Listener/AbortListener.cs | 31 + AerospikeClient/Listener/CommitListener.cs | 37 + AerospikeClient/Main/AbortStatus.cs | 47 + AerospikeClient/Main/AerospikeClient.cs | 5174 ++++++++------- AerospikeClient/Main/AerospikeException.cs | 137 +- AerospikeClient/Main/BatchRecord.cs | 4 +- AerospikeClient/Main/CommitError.cs | 45 + AerospikeClient/Main/CommitStatus.cs | 49 + AerospikeClient/Main/IAerospikeClient.cs | 77 +- AerospikeClient/Main/Key.cs | 12 +- AerospikeClient/Main/ResultCode.cs | 74 +- AerospikeClient/Main/Txn.cs | 303 + AerospikeClient/Metrics/LatencyBuckets.cs | 2 +- AerospikeClient/Metrics/MetricsWriter.cs | 4 +- AerospikeClient/Policy/AdminPolicy.cs | 11 +- AerospikeClient/Policy/BatchDeletePolicy.cs | 15 +- AerospikeClient/Policy/BatchPolicy.cs | 17 +- AerospikeClient/Policy/BatchReadPolicy.cs | 9 + AerospikeClient/Policy/BatchUDFPolicy.cs | 13 +- AerospikeClient/Policy/BatchWritePolicy.cs | 15 +- AerospikeClient/Policy/ClientPolicy.cs | 43 +- AerospikeClient/Policy/CommitLevel.cs | 4 +- AerospikeClient/Policy/InfoPolicy.cs | 11 +- AerospikeClient/Policy/Policy.cs | 56 +- AerospikeClient/Policy/QueryPolicy.cs | 15 +- AerospikeClient/Policy/ScanPolicy.cs | 13 + AerospikeClient/Policy/TlsPolicy.cs | 9 + AerospikeClient/Policy/TxnRollPolicy.cs | 55 + AerospikeClient/Policy/TxnVerifyPolicy.cs | 56 + AerospikeClient/Policy/WritePolicy.cs | 15 +- AerospikeClient/Query/PartitionTracker.cs | 6 +- AerospikeClient/Query/QueryExecutor.cs | 2 +- .../Query/QueryListenerExecutor.cs | 2 +- .../Query/QueryPartitionExecutor.cs | 2 +- AerospikeClient/Query/RecordSet.cs | 4 +- AerospikeClient/Query/ResultSet.cs | 4 +- AerospikeClient/Util/ConcurrentHashMap.cs | 166 + AerospikeClient/Util/ConcurrentHashSet.cs | 138 + AerospikeDemo/AsyncTransaction.cs | 289 + AerospikeDemo/AsyncTransactionWithTask.cs | 100 + AerospikeDemo/DemoForm.cs | 3 + AerospikeDemo/Transaction.cs | 79 + AerospikeTest/AerospikeTest.csproj | 2 +- AerospikeTest/Args.cs | 433 +- AerospikeTest/Async/TestAsync.cs | 35 + AerospikeTest/Async/TestAsyncTxn.cs | 1155 ++++ AerospikeTest/Sync/Basic/TestBatch.cs | 982 +-- AerospikeTest/Sync/Basic/TestTxn.cs | 734 +++ AerospikeTest/settings.json | 35 +- 121 files changed, 18089 insertions(+), 10232 deletions(-) create mode 100644 .github/actions/run-ee-server/action.yml create mode 100644 .github/actions/wait-for-as-server-to-start/action.yml create mode 100644 .github/workflows/build-artifacts.yml create mode 100644 .github/workflows/docker-build-context/Dockerfile create mode 100644 .github/workflows/docker-build-context/roster.smd create mode 100644 .github/workflows/docker-build-context/security.smd create mode 100644 .github/workflows/tests.yml create mode 100644 .github/workflows/wait-for-as-server-to-start.bash create mode 100644 AerospikeClient/Async/AsyncOperateRead.cs create mode 100644 AerospikeClient/Async/AsyncOperateWrite.cs create mode 100644 AerospikeClient/Async/AsyncReadBase.cs rename AerospikeClient/Async/{AsyncOperate.cs => AsyncTxnAddKeys.cs} (50%) create mode 100644 AerospikeClient/Async/AsyncTxnClose.cs create mode 100644 AerospikeClient/Async/AsyncTxnMarkRollForward.cs create mode 100644 AerospikeClient/Async/AsyncTxnMonitor.cs create mode 100644 AerospikeClient/Async/AsyncTxnRoll.cs create mode 100644 AerospikeClient/Async/AsyncWriteBase.cs create mode 100644 AerospikeClient/AsyncTask/AbortListenerAdapter.cs create mode 100644 AerospikeClient/AsyncTask/CommitListenerAdapter.cs create mode 100644 AerospikeClient/Command/OperateCommandRead.cs create mode 100644 AerospikeClient/Command/OperateCommandWrite.cs create mode 100644 AerospikeClient/Command/SyncReadCommand.cs rename AerospikeClient/Command/{OperateCommand.cs => SyncWriteCommand.cs} (51%) create mode 100644 AerospikeClient/Command/TxnAddKeys.cs create mode 100644 AerospikeClient/Command/TxnClose.cs create mode 100644 AerospikeClient/Command/TxnMarkRollForward.cs create mode 100644 AerospikeClient/Command/TxnMonitor.cs create mode 100644 AerospikeClient/Command/TxnRoll.cs create mode 100644 AerospikeClient/Listener/AbortListener.cs create mode 100644 AerospikeClient/Listener/CommitListener.cs create mode 100644 AerospikeClient/Main/AbortStatus.cs create mode 100644 AerospikeClient/Main/CommitError.cs create mode 100644 AerospikeClient/Main/CommitStatus.cs create mode 100644 AerospikeClient/Main/Txn.cs create mode 100644 AerospikeClient/Policy/TxnRollPolicy.cs create mode 100644 AerospikeClient/Policy/TxnVerifyPolicy.cs create mode 100644 AerospikeClient/Util/ConcurrentHashMap.cs create mode 100644 AerospikeClient/Util/ConcurrentHashSet.cs create mode 100644 AerospikeDemo/AsyncTransaction.cs create mode 100644 AerospikeDemo/AsyncTransactionWithTask.cs create mode 100644 AerospikeDemo/Transaction.cs create mode 100644 AerospikeTest/Async/TestAsyncTxn.cs create mode 100644 AerospikeTest/Sync/Basic/TestTxn.cs diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml new file mode 100644 index 00000000..e0790508 --- /dev/null +++ b/.github/actions/run-ee-server/action.yml @@ -0,0 +1,77 @@ +name: 'Run EE Server' +description: 'Run EE server. Returns once server is ready. Only tested on Linux and macOS' +# NOTE: do not share this server container with others +# since it's using the default admin / admin credentials +inputs: + # All inputs in composite actions are strings + use-server-rc: + required: true + description: Deploy server release candidate? + default: 'false' + server-tag: + required: true + description: Specify Docker tag + default: 'latest' + # Github Composite Actions can't access secrets + # so we need to pass them in as inputs + docker-hub-username: + description: Required for using release candidates + required: false + docker-hub-password: + description: Required for using release candidates + required: false + +runs: + using: "composite" + steps: + - name: Log into Docker Hub to get server RC + if: ${{ inputs.use-server-rc == 'true' }} + run: docker login --username ${{ inputs.docker-hub-username }} --password ${{ inputs.docker-hub-password }} + shell: bash + + - run: echo IMAGE_NAME=aerospike/aerospike-server-enterprise${{ inputs.use-server-rc == 'true' && '-rc' || '' }}:${{ inputs.server-tag }} >> $GITHUB_ENV + shell: bash + + - run: echo NEW_IMAGE_NAME=${{ env.IMAGE_NAME }}-security-and-sc >> $GITHUB_ENV + shell: bash + + # macOS Github runners and Windows self-hosted runners don't have buildx installed by default + - if: ${{ runner.os == 'Windows' || runner.os == 'macOS' }} + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v6 + with: + # Don't want to use default Git context or else it will clone the whole client repo again + context: .github/workflows/docker-build-context + build-args: | + server_image=${{ env.IMAGE_NAME }} + tags: ${{ env.NEW_IMAGE_NAME }} + # setup-buildx-action configures Docker to use the docker-container build driver + # This driver doesn't publish an image locally by default + # so we have to manually enable it + load: true + + - run: echo SERVER_CONTAINER_NAME="aerospike" >> $GITHUB_ENV + shell: bash + + - run: docker run -d --name ${{ env.SERVER_CONTAINER_NAME }} -e DEFAULT_TTL=2592000 -p 3000:3000 ${{ env.NEW_IMAGE_NAME }} + shell: bash + + - uses: ./.github/actions/wait-for-as-server-to-start + with: + container-name: ${{ env.SERVER_CONTAINER_NAME }} + is-security-enabled: false + is-strong-consistency-enabled: true + + # All the partitions are assumed to be dead when reusing a roster file + - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm --enable --execute "manage revive ns test" + shell: bash + + # Apply changes + - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm --enable --execute "manage recluster" + shell: bash + + # For debugging + - run: docker logs aerospike + shell: bash diff --git a/.github/actions/wait-for-as-server-to-start/action.yml b/.github/actions/wait-for-as-server-to-start/action.yml new file mode 100644 index 00000000..373c2697 --- /dev/null +++ b/.github/actions/wait-for-as-server-to-start/action.yml @@ -0,0 +1,28 @@ +name: 'Wait for Aerospike server to start' +description: Only tested on Linux and macOS +inputs: + container-name: + required: true + is-security-enabled: + required: false + default: 'false' + is-strong-consistency-enabled: + required: false + default: 'false' + +runs: + using: "composite" + steps: + - name: 'macOS: install timeout command' + if: ${{ runner.os == 'macOS' }} + run: brew install coreutils + shell: bash + + # Composite actions doesn't support step-level timeout-minutes + # Use timeout command and store polling logic in file to make it easier to read + # Call bash shell explicitly since timeout uses "sh" shell by default, for some reason + # Also, we don't want to fail if we timeout in case the server *did* finish starting up but the script couldn't detect it due to a bug + # Effectively, this composite action is like calling "sleep" that is optimized to exit early when it detects an ok from the server + - name: Wait for EE server to start + run: timeout 30 bash ./.github/workflows/wait-for-as-server-to-start.bash ${{ inputs.container-name }} ${{ inputs.is-security-enabled }} ${{ inputs.is-strong-consistency-enabled }} || true + shell: bash diff --git a/.github/workflows/build-artifacts.yml b/.github/workflows/build-artifacts.yml new file mode 100644 index 00000000..5ce0674d --- /dev/null +++ b/.github/workflows/build-artifacts.yml @@ -0,0 +1,70 @@ +name: Build artifacts +run-name: Build artifacts (run_tests=${{ inputs.run_tests }}, use-server-rc=${{ inputs.use-server-rc }}, server-tag=${{ inputs.server-tag }}) + +on: + workflow_dispatch: + inputs: + run_tests: + description: "Run integration tests?" + required: true + type: boolean + default: false + use-server-rc: + type: boolean + required: true + default: false + description: 'Test against server release candidate? (e.g to test new server features)' + server-tag: + type: string + required: true + default: 'latest' + description: 'Server docker image tag (e.g to test a client backport version)' + + workflow_call: + inputs: + # The "dev" tests test the artifacts against a server + run_tests: + required: false + type: boolean + default: false + # workflow_call hack + is_workflow_call: + type: boolean + default: true + required: false + # This input is only used in workflow_call events + sha-to-build-and-test: + description: A calling workflow may want to run this workflow on a different ref than the calling workflow's ref + type: string + # Make it required to make things simple + required: true + # A calling workflow doesn't actually set values to the inputs below + # But that workflow needs to have default values for these inputs + use-server-rc: + required: false + default: false + type: boolean + server-tag: + type: string + required: false + default: 'latest' + secrets: + DOCKER_HUB_BOT_USERNAME: + required: true + DOCKER_HUB_BOT_PW: + required: true + MAC_M1_SELF_HOSTED_RUNNER_PW: + required: true + +jobs: + dotnet: + strategy: + fail-fast: false + uses: ./.github/workflows/dotnet.yml + with: + # Can't use env context here, so just copy from build-sdist env var + sha-to-build-and-test: ${{ inputs.is_workflow_call == true && inputs.sha-to-build-and-test || github.sha }} + run_tests: ${{ inputs.run_tests }} + use-server-rc: ${{ inputs.use-server-rc }} + server-tag: ${{ inputs.server-tag }} + secrets: inherit diff --git a/.github/workflows/docker-build-context/Dockerfile b/.github/workflows/docker-build-context/Dockerfile new file mode 100644 index 00000000..5f20cb51 --- /dev/null +++ b/.github/workflows/docker-build-context/Dockerfile @@ -0,0 +1,39 @@ +ARG server_image=aerospike/aerospike-server-enterprise +ARG ROSTER_FILE_NAME=roster.smd +# Temp file for passing node id from one build stage to another +# Docker doesn't support command substitution for setting values for ARG variables, so we have to do this +ARG NODE_ID_FILE_NAME=node_id + +FROM $server_image as configure-server + +WORKDIR /opt/aerospike/smd + +# Enable authentication + +ARG AEROSPIKE_CONF_TEMPLATE_PATH=/etc/aerospike/aerospike.template.conf + +# Enable strong consistency +RUN sed -i "s/\(namespace.*{\)/\1\n\tstrong-consistency true/" $AEROSPIKE_CONF_TEMPLATE_PATH +RUN sed -i "s/\(namespace.*{\)/\1\n\tstrong-consistency-allow-expunge true/" $AEROSPIKE_CONF_TEMPLATE_PATH +ARG ROSTER_FILE_NAME +COPY $ROSTER_FILE_NAME . + +# Fetch node id from roster.smd + +# There's no tag for the latest major version to prevent breaking changes in jq +# This is the next best thing +FROM ghcr.io/jqlang/jq:1.7 as get-jq +# jq docker image doesn't have a shell +# We need a shell to fetch and pass the node id to the next build stage +FROM busybox as get-node-id +COPY --from=get-jq /jq /bin/ +ARG ROSTER_FILE_NAME +COPY $ROSTER_FILE_NAME . +ARG NODE_ID_FILE_NAME +RUN jq --raw-output '.[1].value' $ROSTER_FILE_NAME > $NODE_ID_FILE_NAME + +FROM configure-server as set-node-id +ARG NODE_ID_FILE_NAME +COPY --from=get-node-id $NODE_ID_FILE_NAME . +RUN sed -i "s/\(^service {\)/\1\n\tnode-id $(cat $NODE_ID_FILE_NAME)/" $AEROSPIKE_CONF_TEMPLATE_PATH +RUN rm $NODE_ID_FILE_NAME diff --git a/.github/workflows/docker-build-context/roster.smd b/.github/workflows/docker-build-context/roster.smd new file mode 100644 index 00000000..66daed5f --- /dev/null +++ b/.github/workflows/docker-build-context/roster.smd @@ -0,0 +1,12 @@ +[ + [ + 97107025374203, + 1 + ], + { + "key": "test", + "value": "a1", + "generation": 1, + "timestamp": 465602976982 + } +] diff --git a/.github/workflows/docker-build-context/security.smd b/.github/workflows/docker-build-context/security.smd new file mode 100644 index 00000000..9c530d51 --- /dev/null +++ b/.github/workflows/docker-build-context/security.smd @@ -0,0 +1,48 @@ +[ + [ + 162276881999406, + 14 + ], + { + "key": "admin|P", + "value": "$2a$10$7EqJtq98hPqEX7fNZaFWoO1mVO/4MLpGzsqojz6E9Gef6iXDjXdDa", + "generation": 1, + "timestamp": 0 + }, + { + "key": "admin|R|user-admin", + "value": "", + "generation": 1, + "timestamp": 0 + }, + { + "key": "superuser|P", + "value": "$2a$10$7EqJtq98hPqEX7fNZaFWoOZX0o4mZCBUwvzt/iecIcG4JaDOC41zK", + "generation": 3, + "timestamp": 458774922440 + }, + { + "key": "superuser|R|read-write-udf", + "value": "", + "generation": 3, + "timestamp": 458774922441 + }, + { + "key": "superuser|R|sys-admin", + "value": "", + "generation": 3, + "timestamp": 458774922442 + }, + { + "key": "superuser|R|user-admin", + "value": "", + "generation": 3, + "timestamp": 458774922442 + }, + { + "key": "superuser|R|data-admin", + "value": null, + "generation": 2, + "timestamp": 458774718056 + } +] diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..9cfaf6c7 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,58 @@ +name: Run tests + +# Trigger test workflow whenever: +# 1. A pull request is updated (e.g with new commits) +# 2. Commits are pushed directly to the stage or master branch +on: + push: + branches: ["stage*", "master*"] + pull_request: + branches: ["stage*", "master*"] + types: [ + # Default triggers + opened, + synchronize, + reopened, + # Additional triggers + labeled, + unlabeled + ] + workflow_dispatch: + inputs: + test-server-rc: + type: boolean + default: false + required: true + +jobs: + + test-ee: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - uses: ./.github/actions/run-ee-server + with: + use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} + docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} + docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }} + + - name: Setup .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: 6.0.x + + - name: Restore dependencies + run: dotnet restore /p:EnableWindowsTargeting=true + + - name: Build + run: dotnet build --configuration Release --no-restore /p:EnableWindowsTargeting=true + + - name: Run tests + run: dotnet test --configuration Release --no-build --verbosity normal + + - name: Show logs if failed + if: ${{ failure() }} + run: | + docker container logs aerospike + cat ./configs/aerospike.conf diff --git a/.github/workflows/wait-for-as-server-to-start.bash b/.github/workflows/wait-for-as-server-to-start.bash new file mode 100644 index 00000000..c43e17da --- /dev/null +++ b/.github/workflows/wait-for-as-server-to-start.bash @@ -0,0 +1,47 @@ +#!/bin/bash + +set -x +# Makes sure that if the "docker exec" command fails, it is not ignored +set -o pipefail + +container_name=$1 +is_security_enabled=$2 + +if [[ $is_security_enabled == true ]]; then + # We need to pass credentials to asinfo if server requires it + # TODO: passing in credentials via command line flags since I can't figure out how to use --instance with global astools.conf + user_credentials="--user=admin --password=admin" +fi + +while true; do + # An unset variable will have a default empty value + # Intermediate step is to print docker exec command's output in case it fails + # Sometimes, errors only appear in stdout and not stderr, like if asinfo throws an error because of no credentials + # (This is a bug in asinfo since all error messages should be sent to stderr) + # But piping and passing stdin to grep will hide the first command's stdout. + # grep doesn't have a way to print all lines passed as input. + # ack does have an option but it doesn't come installed by default + # shellcheck disable=SC2086 # The flags in user credentials should be separate anyways. Not one string + echo "Checking if we can reach the server via the service port..." + if docker exec "$container_name" asinfo $user_credentials -v status | tee >(cat) | grep -qE "^ok"; then + # Server is ready when asinfo returns ok + echo "Can reach server now." + # docker container inspect "$container_name" + break + fi + + echo "Server didn't return ok via the service port. Polling again..." +done + +# Although the server may be reachable via the service port, the cluster may not be fully initialized yet. +# If we try to connect too soon (e.g right after "status" returns ok), the client may throw error code -1 +while true; do + echo "Waiting for server to stabilize (i.e return a cluster key)..." + # We assume that when an ERROR is returned, the cluster is not stable yet (i.e not fully initialized) + if docker exec "$container_name" asinfo $user_credentials -v cluster-stable 2>&1 | (! grep -qE "^ERROR"); then + echo "Server is in a stable state." + break + fi + + echo "Server did not return a cluster key. Polling again..." +done diff --git a/AerospikeClient/Admin/Role.cs b/AerospikeClient/Admin/Role.cs index feaa3130..2b8af1aa 100644 --- a/AerospikeClient/Admin/Role.cs +++ b/AerospikeClient/Admin/Role.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -49,22 +49,22 @@ public sealed class Role public const string SIndexAdmin = "sindex-admin"; /// - /// Allow read transactions. + /// Allow read commands. /// public const string Read = "read"; /// - /// Allow read and write transactions. + /// Allow read and write commands. /// public const string ReadWrite = "read-write"; /// - /// Allow read and write transactions within user defined functions. + /// Allow read and write commands within user defined functions. /// public const string ReadWriteUdf = "read-write-udf"; /// - /// Allow write transactions. + /// Allow write commands. /// public const string Write = "write"; diff --git a/AerospikeClient/Async/AsyncBatch.cs b/AerospikeClient/Async/AsyncBatch.cs index 8f3a7b75..4d6056e3 100644 --- a/AerospikeClient/Async/AsyncBatch.cs +++ b/AerospikeClient/Async/AsyncBatch.cs @@ -1,1910 +1,2178 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - //------------------------------------------------------- - // ReadList - //------------------------------------------------------- - - public sealed class AsyncBatchReadListExecutor : AsyncBatchExecutor - { - private readonly BatchListListener listener; - private readonly List records; - - public AsyncBatchReadListExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchListListener listener, - List records - ) : base(cluster, true) - { - this.listener = listener; - this.records = records; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records); - } - // Dispatch commands to nodes. - Execute(commands); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(records); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchReadListCommand : AsyncBatchCommand - { - private readonly List records; - - public AsyncBatchReadListCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - List records - ) : base(parent, cluster, batch, batchPolicy, true) - { - this.records = records; - } - - public AsyncBatchReadListCommand(AsyncBatchReadListCommand other) : base(other) - { - this.records = other.records; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - SetBatchOperate(batchPolicy, records, batch); - } - else - { - SetBatchRead(batchPolicy, records, batch); - } - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - BatchRead record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, false); - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchReadListCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchReadListCommand(parent, cluster, batchNode, batchPolicy, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); - } - } - - //------------------------------------------------------- - // ReadSequence - //------------------------------------------------------- - - public sealed class AsyncBatchReadSequenceExecutor : AsyncBatchExecutor - { - private readonly BatchSequenceListener listener; - - public AsyncBatchReadSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchSequenceListener listener, - List records - ) : base(cluster, true) - { - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records); - } - // Dispatch commands to nodes. - Execute(commands); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchReadSequenceCommand : AsyncBatchCommand - { - private readonly BatchSequenceListener listener; - private readonly List records; - - public AsyncBatchReadSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - BatchSequenceListener listener, - List records - ) : base(parent, cluster, batch, batchPolicy, true) - { - this.listener = listener; - this.records = records; - } - - public AsyncBatchReadSequenceCommand(AsyncBatchReadSequenceCommand other) : base(other) - { - this.listener = other.listener; - this.records = other.records; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - SetBatchOperate(batchPolicy, records, batch); - } - else - { - SetBatchRead(batchPolicy, records, batch); - } - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - BatchRead record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, false); - } - listener.OnRecord(record); - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchReadSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchReadSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); - } - } - - //------------------------------------------------------- - // GetArray - //------------------------------------------------------- - - public sealed class AsyncBatchGetArrayExecutor : AsyncBatchExecutor - { - private readonly Key[] keys; - private readonly Record[] records; - private readonly RecordArrayListener listener; - - public AsyncBatchGetArrayExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - RecordArrayListener listener, - Key[] keys, - string[] binNames, - Operation[] ops, - int readAttr, - bool isOperation - ) : base(cluster, false) - { - this.keys = keys; - this.records = new Record[keys.Length]; - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchGetArrayCommand(this, cluster, batchNode, policy, keys, binNames, ops, records, readAttr, isOperation); - } - // Dispatch commands to nodes. - Execute(commands); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(keys, records); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(new AerospikeException.BatchRecords(records, ae)); - } - } - - sealed class AsyncBatchGetArrayCommand : AsyncBatchCommand - { - private readonly Key[] keys; - private readonly string[] binNames; - private readonly Operation[] ops; - private readonly Record[] records; - private readonly int readAttr; - - public AsyncBatchGetArrayCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string[] binNames, - Operation[] ops, - Record[] records, - int readAttr, - bool isOperation - ) : base(parent, cluster, batch, batchPolicy, isOperation) - { - this.keys = keys; - this.binNames = binNames; - this.ops = ops; - this.records = records; - this.readAttr = readAttr; - } - - public AsyncBatchGetArrayCommand(AsyncBatchGetArrayCommand other) : base(other) - { - this.keys = other.keys; - this.binNames = other.binNames; - this.ops = other.ops; - this.records = other.records; - this.readAttr = other.readAttr; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - BatchAttr attr = new BatchAttr(batchPolicy, readAttr, ops); - SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); - } - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - if (resultCode == 0) - { - records[batchIndex] = ParseRecord(); - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchGetArrayCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchGetArrayCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - //------------------------------------------------------- - // GetSequence - //------------------------------------------------------- - - public sealed class AsyncBatchGetSequenceExecutor : AsyncBatchExecutor - { - private readonly RecordSequenceListener listener; - - public AsyncBatchGetSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - RecordSequenceListener listener, - Key[] keys, - string[] binNames, - Operation[] ops, - int readAttr, - bool isOperation - ) : base(cluster, false) - { - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, ops, listener, readAttr, isOperation); - } - // Dispatch commands to nodes. - Execute(commands); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchGetSequenceCommand : AsyncBatchCommand - { - private readonly Key[] keys; - private readonly string[] binNames; - private readonly Operation[] ops; - private readonly RecordSequenceListener listener; - private readonly int readAttr; - - public AsyncBatchGetSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string[] binNames, - Operation[] ops, - RecordSequenceListener listener, - int readAttr, - bool isOperation - ) : base(parent, cluster, batch, batchPolicy, isOperation) - { - this.keys = keys; - this.binNames = binNames; - this.ops = ops; - this.listener = listener; - this.readAttr = readAttr; - } - - public AsyncBatchGetSequenceCommand(AsyncBatchGetSequenceCommand other) : base(other) - { - this.keys = other.keys; - this.binNames = other.binNames; - this.ops = other.ops; - this.listener = other.listener; - this.readAttr = other.readAttr; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - BatchAttr attr = new BatchAttr(batchPolicy, readAttr, ops); - SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); - } - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - Key keyOrig = keys[batchIndex]; - - if (resultCode == 0) - { - Record record = ParseRecord(); - listener.OnRecord(keyOrig, record); - } - else - { - listener.OnRecord(keyOrig, null); - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchGetSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchGetSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, listener, readAttr, isOperation); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - //------------------------------------------------------- - // ExistsArray - //------------------------------------------------------- - - public sealed class AsyncBatchExistsArrayExecutor : AsyncBatchExecutor - { - private readonly Key[] keys; - private readonly bool[] existsArray; - private readonly ExistsArrayListener listener; - - public AsyncBatchExistsArrayExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - Key[] keys, - ExistsArrayListener listener - ) : base(cluster,false) - { - this.keys = keys; - this.existsArray = new bool[keys.Length]; - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray); - } - // Dispatch commands to nodes. - Execute(commands); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(keys, existsArray); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(new AerospikeException.BatchExists(existsArray, ae)); - } - } - - sealed class AsyncBatchExistsArrayCommand : AsyncBatchCommand - { - private readonly Key[] keys; - private readonly bool[] existsArray; - - public AsyncBatchExistsArrayCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - bool[] existsArray - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.existsArray = existsArray; - } - - public AsyncBatchExistsArrayCommand(AsyncBatchExistsArrayCommand other) : base(other) - { - this.keys = other.keys; - this.existsArray = other.existsArray; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - BatchAttr attr = new BatchAttr(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); - SetBatchOperate(batchPolicy, keys, batch, null, null, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); - } - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - if (opCount > 0) - { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - - existsArray[batchIndex] = resultCode == 0; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchExistsArrayCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchExistsArrayCommand(parent, cluster, batchNode, batchPolicy, keys, existsArray); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - //------------------------------------------------------- - // ExistsSequence - //------------------------------------------------------- - - public sealed class AsyncBatchExistsSequenceExecutor : AsyncBatchExecutor - { - private readonly ExistsSequenceListener listener; - - public AsyncBatchExistsSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - Key[] keys, - ExistsSequenceListener listener - ) : base(cluster, false) - { - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener); - } - // Dispatch commands to nodes. - Execute(commands); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - - } - - sealed class AsyncBatchExistsSequenceCommand : AsyncBatchCommand - { - private readonly Key[] keys; - private readonly ExistsSequenceListener listener; - - public AsyncBatchExistsSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - ExistsSequenceListener listener - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.listener = listener; - } - - public AsyncBatchExistsSequenceCommand(AsyncBatchExistsSequenceCommand other) : base(other) - { - this.keys = other.keys; - this.listener = other.listener; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - BatchAttr attr = new BatchAttr(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); - SetBatchOperate(batchPolicy, keys, batch, null, null, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); - } - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - if (opCount > 0) - { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - - Key keyOrig = keys[batchIndex]; - listener.OnExists(keyOrig, resultCode == 0); - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchExistsSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchExistsSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, listener); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - //------------------------------------------------------- - // OperateList - //------------------------------------------------------- - - public sealed class AsyncBatchOperateListExecutor : AsyncBatchExecutor - { - internal readonly BatchOperateListListener listener; - internal readonly List records; - - public AsyncBatchOperateListExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchOperateListListener listener, - List records - ) : base(cluster, true) - { - this.listener = listener; - this.records = records; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchOperateListCommand(this, cluster, batchNode, policy, records); - } - // Dispatch commands to nodes. - Execute(tasks); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(records, GetStatus()); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchOperateListCommand : AsyncBatchCommand - { - internal readonly List records; - - public AsyncBatchOperateListCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - List records - ) : base(parent, cluster, batch, batchPolicy, true) - { - this.records = records; - } - - public AsyncBatchOperateListCommand(AsyncBatchOperateListCommand other) : base(other) - { - this.records = other.records; - } - - protected internal override bool IsWrite() - { - // This method is only called to set inDoubt on node level errors. - // SetError() will filter out reads when setting record level inDoubt. - return true; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, records, batch); - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - return; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); - parent.SetRowError(); - return; - } - } - - record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); - parent.SetRowError(); - } - - internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = record.hasWrite; - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchOperateListCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchOperateListCommand(parent, cluster, batchNode, batchPolicy, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); - } - } - - //------------------------------------------------------- - // OperateSequence - //------------------------------------------------------- - - public sealed class AsyncBatchOperateSequenceExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordSequenceListener listener; - - public AsyncBatchOperateSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordSequenceListener listener, - List records - ) : base(cluster, true) - { - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchOperateSequenceCommand(this, cluster, batchNode, policy, listener, records); - } - // Dispatch commands to nodes. - Execute(tasks); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchOperateSequenceCommand : AsyncBatchCommand - { - internal readonly BatchRecordSequenceListener listener; - internal readonly List records; - - public AsyncBatchOperateSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - BatchRecordSequenceListener listener, - List records - ) : base(parent, cluster, batch, batchPolicy, true) - { - this.listener = listener; - this.records = records; - } - - public AsyncBatchOperateSequenceCommand(AsyncBatchOperateSequenceCommand other) : base(other) - { - this.listener = other.listener; - this.records = other.records; - } - - protected internal override bool IsWrite() - { - // This method is only called to set inDoubt on node level errors. - // SetError() will filter out reads when setting record level inDoubt. - return true; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, records, batch); - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); - } - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); - } - AsyncBatch.OnRecord(cluster, listener, record, batchIndex); - } - - internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - // Set inDoubt, but do not call OnRecord() because user already has access to full - // BatchRecord list and can examine each record for inDoubt when the exception occurs. - record.inDoubt = record.hasWrite; - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchOperateSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchOperateSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); - } - } - - //------------------------------------------------------- - // OperateRecordArray - //------------------------------------------------------- - - public sealed class AsyncBatchOperateRecordArrayExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordArrayListener listener; - internal readonly BatchRecord[] records; - - public AsyncBatchOperateRecordArrayExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordArrayListener listener, - Key[] keys, - Operation[] ops, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - this.records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, attr.hasWrite, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchOperateRecordArrayCommand(this, cluster, batchNode, policy, keys, ops, records, attr); - } - // Dispatch commands to nodes. - Execute(tasks); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(records, GetStatus()); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(records, ae); - } - } - - sealed class AsyncBatchOperateRecordArrayCommand : AsyncBatchCommand - { - internal readonly Key[] keys; - internal readonly Operation[] ops; - internal readonly BatchRecord[] records; - internal readonly BatchAttr attr; - - public AsyncBatchOperateRecordArrayCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - Operation[] ops, - BatchRecord[] records, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, ops != null) - { - this.keys = keys; - this.ops = ops; - this.records = records; - this.attr = attr; - } - - public AsyncBatchOperateRecordArrayCommand(AsyncBatchOperateRecordArrayCommand other) : base(other) - { - this.keys = other.keys; - this.ops = other.ops; - this.records = other.records; - this.attr = other.attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - parent.SetRowError(); - } - } - - internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = inDoubt; - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchOperateRecordArrayCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchOperateRecordArrayCommand(parent, cluster, batchNode, batchPolicy, keys, ops, records, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); - } - } - - //------------------------------------------------------- - // OperateRecordSequence - //------------------------------------------------------- - - public sealed class AsyncBatchOperateRecordSequenceExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordSequenceListener listener; - private readonly bool[] sent; - - public AsyncBatchOperateRecordSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordSequenceListener listener, - Key[] keys, - Operation[] ops, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.sent = new bool[keys.Length]; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchOperateRecordSequenceCommand(this, cluster, batchNode, policy, keys, ops, sent, listener, attr); - } - // Dispatch commands to nodes. - Execute(tasks); - } - - public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) - { - BatchRecord record = new BatchRecord(key, null, ae.Result, inDoubt, hasWrite); - sent[index] = true; - AsyncBatch.OnRecord(cluster, listener, record, index); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchOperateRecordSequenceCommand : AsyncBatchCommand - { - internal readonly Key[] keys; - internal readonly Operation[] ops; - internal readonly bool[] sent; - internal readonly BatchRecordSequenceListener listener; - internal readonly BatchAttr attr; - - public AsyncBatchOperateRecordSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - Operation[] ops, - bool[] sent, - BatchRecordSequenceListener listener, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, ops != null) - { - this.keys = keys; - this.ops = ops; - this.sent = sent; - this.listener = listener; - this.attr = attr; - } - - public AsyncBatchOperateRecordSequenceCommand(AsyncBatchOperateRecordSequenceCommand other) : base(other) - { - this.keys = other.keys; - this.ops = other.ops; - this.sent = other.sent; - this.listener = other.listener; - this.attr = other.attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - Key keyOrig = keys[batchIndex]; - BatchRecord record; - - if (resultCode == 0) - { - record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite); - } - else - { - record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); - } - sent[batchIndex] = true; - AsyncBatch.OnRecord(cluster, listener, record, batchIndex); - } - - internal override void SetInDoubt(bool inDoubt) - { - // Set inDoubt for all unsent records, so the listener receives a full set of records. - foreach (int index in batch.offsets) - { - if (!sent[index]) - { - Key key = keys[index]; - BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); - sent[index] = true; - AsyncBatch.OnRecord(cluster, listener, record, index); - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchOperateRecordSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchOperateRecordSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, ops, sent, listener, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); - } - } - - //------------------------------------------------------- - // UDFArray - //------------------------------------------------------- - - public sealed class AsyncBatchUDFArrayExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordArrayListener listener; - internal readonly BatchRecord[] recordArray; - - public AsyncBatchUDFArrayExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordArrayListener listener, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.recordArray = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - this.recordArray[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, recordArray, attr.hasWrite, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchUDFArrayCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, recordArray, attr); - } - // Dispatch commands to nodes. - Execute(tasks); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(recordArray, GetStatus()); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(recordArray, ae); - } - } - - public sealed class AsyncBatchUDFArrayCommand : AsyncBatchCommand - { - internal readonly Key[] keys; - internal readonly string packageName; - internal readonly string functionName; - internal readonly byte[] argBytes; - internal readonly BatchRecord[] records; - internal readonly BatchAttr attr; - - public AsyncBatchUDFArrayCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchRecord[] records, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.packageName = packageName; - this.functionName = functionName; - this.argBytes = argBytes; - this.records = records; - this.attr = attr; - } - - public AsyncBatchUDFArrayCommand(AsyncBatchUDFArrayCommand other) : base(other) - { - this.keys = other.keys; - this.packageName = other.packageName; - this.functionName = other.functionName; - this.argBytes = other.argBytes; - this.records = other.records; - this.attr = other.attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - return; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter); - parent.SetRowError(); - return; - } - } - - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - parent.SetRowError(); - } - - internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = inDoubt; - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchUDFArrayCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchUDFArrayCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); - } - } - - //------------------------------------------------------- - // UDFSequence - //------------------------------------------------------- - - public sealed class AsyncBatchUDFSequenceExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordSequenceListener listener; - private readonly bool[] sent; - - public AsyncBatchUDFSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordSequenceListener listener, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.sent = new bool[keys.Length]; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchUDFSequenceCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, sent, listener, attr); - } - // Dispatch commands to nodes. - Execute(tasks); - } - - public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) - { - BatchRecord record = new BatchRecord(key, null, ae.Result, inDoubt, hasWrite); - sent[index] = true; - AsyncBatch.OnRecord(cluster, listener, record, index); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchUDFSequenceCommand : AsyncBatchCommand - { - internal readonly Key[] keys; - internal readonly string packageName; - internal readonly string functionName; - internal readonly byte[] argBytes; - internal readonly bool[] sent; - internal readonly BatchRecordSequenceListener listener; - internal readonly BatchAttr attr; - - public AsyncBatchUDFSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - bool[] sent, - BatchRecordSequenceListener listener, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.packageName = packageName; - this.functionName = functionName; - this.argBytes = argBytes; - this.sent = sent; - this.listener = listener; - this.attr = attr; - } - - public AsyncBatchUDFSequenceCommand(AsyncBatchUDFSequenceCommand other) : base(other) - { - this.keys = other.keys; - this.packageName = other.packageName; - this.functionName = other.functionName; - this.argBytes = other.argBytes; - this.sent = other.sent; - this.listener = other.listener; - this.attr = other.attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - Key keyOrig = keys[batchIndex]; - BatchRecord record; - - if (resultCode == 0) - { - record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite); - } - else if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record = new BatchRecord(keyOrig, r, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); - } - else - { - record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); - } - } - else - { - record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); - } - sent[batchIndex] = true; - AsyncBatch.OnRecord(cluster, listener, record, batchIndex); - } - - internal override void SetInDoubt(bool inDoubt) - { - // Set inDoubt for all unsent records, so the listener receives a full set of records. - foreach (int index in batch.offsets) - { - if (!sent[index]) - { - Key key = keys[index]; - BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); - sent[index] = true; - AsyncBatch.OnRecord(cluster, listener, record, index); - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchUDFSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchUDFSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, sent, listener, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); - } - } - - //------------------------------------------------------- - // Batch Base Executor - //------------------------------------------------------- - - public abstract class AsyncBatchExecutor : IBatchStatus - { - private AerospikeException exception; - private int max; - private int count; - private readonly bool hasResultCode; - private bool error; - - public AsyncBatchExecutor(AsyncCluster cluster, bool hasResultCode) - { - this.hasResultCode = hasResultCode; - cluster.AddTran(); - } - - public void Execute(AsyncBatchCommand[] commands) - { - max = commands.Length; - - foreach (AsyncBatchCommand command in commands) - { - command.Execute(); - } - } - - public void Retry(AsyncMultiCommand[] commands) - { - lock (this) - { - // Adjust max for new commands minus failed command. - max += commands.Length - 1; - } - - foreach (AsyncBatchCommand command in commands) - { - command.ExecuteBatchRetry(); - } - } - - public void ChildSuccess(AsyncNode node) - { - bool complete; - - lock (this) - { - complete = ++count == max; - } - - if (complete) - { - Finish(); - } - } - - public void ChildFailure(AerospikeException ae) - { - bool complete; - - lock (this) - { - if (exception == null) - { - exception = ae; - } - complete = ++count == max; - } - - if (complete) - { - Finish(); - } - } - - private void Finish() - { - if (exception == null) - { - OnSuccess(); - } - else - { - OnFailure(exception); - } - } - - public virtual void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) - { - // Only used in executors with sequence listeners. - // These executors will override this method. - } - - public void BatchKeyError(AerospikeException ae) - { - error = true; - - if (!hasResultCode) - { - // Legacy batch read commands that do not store a key specific resultCode. - // Store exception which will be passed to the listener on batch completion. - if (exception == null) - { - exception = ae; - } - } - } - - public void SetRowError() - { - // Indicate that a key specific error occurred. - error = true; - } - - public bool GetStatus() - { - return !error; - } - - protected internal abstract void OnSuccess(); - protected internal abstract void OnFailure(AerospikeException ae); - } - - //------------------------------------------------------- - // Batch Base Command - //------------------------------------------------------- - - public abstract class AsyncBatchCommand : AsyncMultiCommand - { - internal readonly AsyncBatchExecutor parent; - internal readonly BatchNode batch; - internal readonly BatchPolicy batchPolicy; - internal uint sequenceAP; - internal uint sequenceSC; - - public AsyncBatchCommand(AsyncBatchExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, bool isOperation) - : base(cluster, batchPolicy, (AsyncNode)batch.node, isOperation) - { - this.parent = parent; - this.batch = batch; - this.batchPolicy = batchPolicy; - } - - public AsyncBatchCommand(AsyncBatchCommand other) : base(other) - { - this.parent = other.parent; - this.batch = other.batch; - this.batchPolicy = other.batchPolicy; - this.sequenceAP = other.sequenceAP; - this.sequenceSC = other.sequenceSC; - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.BATCH; - } - - protected internal override bool PrepareRetry(bool timeout) - { - if (!(policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK)) - { - // Perform regular retry to same node. - return true; - } - - sequenceAP++; - - if (! timeout || policy.readModeSC != ReadModeSC.LINEARIZE) { - sequenceSC++; - } - return false; - } - - protected internal override bool RetryBatch() - { - List batchNodes = null; - - try - { - // Retry requires keys for this node to be split among other nodes. - // This can cause an exponential number of commands. - batchNodes = GenerateBatchNodes(); - - if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) - { - // Batch node is the same. Go through normal retry. - // Normal retries reuse eventArgs, so PutBackArgsOnError() - // should not be called here. - return false; - } - - cluster.AddRetries(batchNodes.Count); - } - catch (Exception) - { - // Close original command. - base.ReleaseBuffer(); - throw; - } - - // Close original command. - base.ReleaseBuffer(); - - // Execute new commands. - AsyncBatchCommand[] cmds = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - AsyncBatchCommand cmd = CreateCommand(batchNode); - cmd.sequenceAP = sequenceAP; - cmd.sequenceSC = sequenceSC; - cmd.SetBatchRetry(this); - cmds[count++] = cmd; - } - - // Retry new commands. - parent.Retry(cmds); - - // Return true so original batch command is stopped. - return true; - } - - protected internal override void OnSuccess() - { - parent.ChildSuccess(node); - } - - protected internal override void OnFailure(AerospikeException e) - { - SetInDoubt(e.InDoubt); - parent.ChildFailure(e); - } - - internal virtual void SetInDoubt(bool inDoubt) - { - // Do nothing by default. Batch writes will override this method. - } - - internal abstract AsyncBatchCommand CreateCommand(BatchNode batchNode); - internal abstract List GenerateBatchNodes(); - } - - internal class AsyncBatch - { - internal static void OnRecord(Cluster cluster, BatchRecordSequenceListener listener, BatchRecord record, int index) - { - try - { - listener.OnRecord(record, index); - } - catch (Exception e) - { - Log.Error(cluster.context, "Unexpected exception from OnRecord(): " + Util.GetErrorMessage(e)); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + //------------------------------------------------------- + // ReadList + //------------------------------------------------------- + + public sealed class AsyncBatchReadListExecutor : AsyncBatchExecutor + { + private readonly BatchListListener listener; + private readonly List records; + + public AsyncBatchReadListExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchListListener listener, + List records + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchReadListCommand : AsyncBatchCommand + { + private readonly List records; + + public AsyncBatchReadListCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + List records + ) : base(parent, cluster, batch, batchPolicy, true) + { + this.records = records; + } + + public AsyncBatchReadListCommand(AsyncBatchReadListCommand other) : base(other) + { + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + SetBatchOperate(batchPolicy, records, batch); + } + else + { + SetBatchRead(batchPolicy, records, batch); + } + } + + protected internal override void ParseRow() + { + BatchRead record = records[batchIndex]; + + ParseFieldsRead(record.key); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, false); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchReadListCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchReadListCommand(parent, cluster, batchNode, batchPolicy, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); + } + } + + //------------------------------------------------------- + // ReadSequence + //------------------------------------------------------- + + public sealed class AsyncBatchReadSequenceExecutor : AsyncBatchExecutor + { + private readonly BatchSequenceListener listener; + + public AsyncBatchReadSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchSequenceListener listener, + List records + ) : base(cluster, true) + { + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchReadSequenceCommand : AsyncBatchCommand + { + private readonly BatchSequenceListener listener; + private readonly List records; + + public AsyncBatchReadSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + BatchSequenceListener listener, + List records + ) : base(parent, cluster, batch, batchPolicy, true) + { + this.listener = listener; + this.records = records; + } + + public AsyncBatchReadSequenceCommand(AsyncBatchReadSequenceCommand other) : base(other) + { + this.listener = other.listener; + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + SetBatchOperate(batchPolicy, records, batch); + } + else + { + SetBatchRead(batchPolicy, records, batch); + } + } + + protected internal override void ParseRow() + { + BatchRead record = records[batchIndex]; + + ParseFieldsRead(record.key); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, false); + } + listener.OnRecord(record); + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchReadSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchReadSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); + } + } + + //------------------------------------------------------- + // GetArray + //------------------------------------------------------- + + public sealed class AsyncBatchGetArrayExecutor : AsyncBatchExecutor + { + private readonly Key[] keys; + private readonly Record[] records; + private readonly RecordArrayListener listener; + + public AsyncBatchGetArrayExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + RecordArrayListener listener, + Key[] keys, + string[] binNames, + Operation[] ops, + int readAttr, + bool isOperation + ) : base(cluster, false) + { + this.keys = keys; + this.records = new Record[keys.Length]; + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchGetArrayCommand(this, cluster, batchNode, policy, keys, binNames, ops, records, readAttr, isOperation); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(keys, records); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(new AerospikeException.BatchRecords(records, ae)); + } + } + + sealed class AsyncBatchGetArrayCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly string[] binNames; + private readonly Operation[] ops; + private readonly Record[] records; + private readonly int readAttr; + + public AsyncBatchGetArrayCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string[] binNames, + Operation[] ops, + Record[] records, + int readAttr, + bool isOperation + ) : base(parent, cluster, batch, batchPolicy, isOperation) + { + this.keys = keys; + this.binNames = binNames; + this.ops = ops; + this.records = records; + this.readAttr = readAttr; + } + + public AsyncBatchGetArrayCommand(AsyncBatchGetArrayCommand other) : base(other) + { + this.keys = other.keys; + this.binNames = other.binNames; + this.ops = other.ops; + this.records = other.records; + this.readAttr = other.readAttr; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + BatchAttr attr = new(batchPolicy, readAttr, ops); + SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); + } + } + + protected internal override void ParseRow() + { + ParseFieldsRead(keys[batchIndex]); + + if (resultCode == 0) + { + records[batchIndex] = ParseRecord(); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchGetArrayCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchGetArrayCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + //------------------------------------------------------- + // GetSequence + //------------------------------------------------------- + + public sealed class AsyncBatchGetSequenceExecutor : AsyncBatchExecutor + { + private readonly RecordSequenceListener listener; + + public AsyncBatchGetSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + RecordSequenceListener listener, + Key[] keys, + string[] binNames, + Operation[] ops, + int readAttr, + bool isOperation + ) : base(cluster, false) + { + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, ops, listener, readAttr, isOperation); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchGetSequenceCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly string[] binNames; + private readonly Operation[] ops; + private readonly RecordSequenceListener listener; + private readonly int readAttr; + + public AsyncBatchGetSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string[] binNames, + Operation[] ops, + RecordSequenceListener listener, + int readAttr, + bool isOperation + ) : base(parent, cluster, batch, batchPolicy, isOperation) + { + this.keys = keys; + this.binNames = binNames; + this.ops = ops; + this.listener = listener; + this.readAttr = readAttr; + } + + public AsyncBatchGetSequenceCommand(AsyncBatchGetSequenceCommand other) : base(other) + { + this.keys = other.keys; + this.binNames = other.binNames; + this.ops = other.ops; + this.listener = other.listener; + this.readAttr = other.readAttr; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + BatchAttr attr = new(batchPolicy, readAttr, ops); + SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); + } + } + + protected internal override void ParseRow() + { + Key keyOrig = keys[batchIndex]; + + ParseFieldsRead(keyOrig); + + if (resultCode == 0) + { + Record record = ParseRecord(); + listener.OnRecord(keyOrig, record); + } + else + { + listener.OnRecord(keyOrig, null); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchGetSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchGetSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, listener, readAttr, isOperation); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + //------------------------------------------------------- + // ExistsArray + //------------------------------------------------------- + + public sealed class AsyncBatchExistsArrayExecutor : AsyncBatchExecutor + { + private readonly Key[] keys; + private readonly bool[] existsArray; + private readonly ExistsArrayListener listener; + + public AsyncBatchExistsArrayExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + Key[] keys, + ExistsArrayListener listener + ) : base(cluster,false) + { + this.keys = keys; + this.existsArray = new bool[keys.Length]; + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(keys, existsArray); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(new AerospikeException.BatchExists(existsArray, ae)); + } + } + + sealed class AsyncBatchExistsArrayCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly bool[] existsArray; + + public AsyncBatchExistsArrayCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + bool[] existsArray + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.existsArray = existsArray; + } + + public AsyncBatchExistsArrayCommand(AsyncBatchExistsArrayCommand other) : base(other) + { + this.keys = other.keys; + this.existsArray = other.existsArray; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + SetBatchOperate(batchPolicy, keys, batch, null, null, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); + } + } + + protected internal override void ParseRow() + { + ParseFieldsRead(keys[batchIndex]); + existsArray[batchIndex] = resultCode == 0; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchExistsArrayCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchExistsArrayCommand(parent, cluster, batchNode, batchPolicy, keys, existsArray); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + //------------------------------------------------------- + // ExistsSequence + //------------------------------------------------------- + + public sealed class AsyncBatchExistsSequenceExecutor : AsyncBatchExecutor + { + private readonly ExistsSequenceListener listener; + + public AsyncBatchExistsSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + Key[] keys, + ExistsSequenceListener listener + ) : base(cluster, false) + { + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + + } + + sealed class AsyncBatchExistsSequenceCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly ExistsSequenceListener listener; + + public AsyncBatchExistsSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + ExistsSequenceListener listener + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.listener = listener; + } + + public AsyncBatchExistsSequenceCommand(AsyncBatchExistsSequenceCommand other) : base(other) + { + this.keys = other.keys; + this.listener = other.listener; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + SetBatchOperate(batchPolicy, keys, batch, null, null, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); + } + } + + protected internal override void ParseRow() + { + Key keyOrig = keys[batchIndex]; + ParseFieldsRead(keyOrig); + listener.OnExists(keyOrig, resultCode == 0); + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchExistsSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchExistsSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, listener); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + //------------------------------------------------------- + // OperateList + //------------------------------------------------------- + + public sealed class AsyncBatchOperateListExecutor : AsyncBatchExecutor + { + internal readonly BatchOperateListListener listener; + internal readonly List records; + + public AsyncBatchOperateListExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchOperateListListener listener, + List records + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchOperateListCommand(this, cluster, batchNode, policy, records); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchOperateListCommand : AsyncBatchCommand + { + internal readonly List records; + + public AsyncBatchOperateListCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + List records + ) : base(parent, cluster, batch, batchPolicy, true) + { + this.records = records; + } + + public AsyncBatchOperateListCommand(AsyncBatchOperateListCommand other) : base(other) + { + this.records = other.records; + } + + protected internal override bool IsWrite() + { + // This method is only called to set inDoubt on node level errors. + // SetError() will filter out reads when setting record level inDoubt. + return true; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, records, batch); + } + + protected internal override void ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record.key, record.hasWrite); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + return; + } + + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); + parent.SetRowError(); + return; + } + } + + record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + + internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchOperateListCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchOperateListCommand(parent, cluster, batchNode, batchPolicy, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); + } + } + + //------------------------------------------------------- + // OperateSequence + //------------------------------------------------------- + + public sealed class AsyncBatchOperateSequenceExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordSequenceListener listener; + + public AsyncBatchOperateSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordSequenceListener listener, + List records + ) : base(cluster, true) + { + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchOperateSequenceCommand(this, cluster, batchNode, policy, listener, records); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchOperateSequenceCommand : AsyncBatchCommand + { + internal readonly BatchRecordSequenceListener listener; + internal readonly List records; + + public AsyncBatchOperateSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + BatchRecordSequenceListener listener, + List records + ) : base(parent, cluster, batch, batchPolicy, true) + { + this.listener = listener; + this.records = records; + } + + public AsyncBatchOperateSequenceCommand(AsyncBatchOperateSequenceCommand other) : base(other) + { + this.listener = other.listener; + this.records = other.records; + } + + protected internal override bool IsWrite() + { + // This method is only called to set inDoubt on node level errors. + // SetError() will filter out reads when setting record level inDoubt. + return true; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, records, batch); + } + + protected internal override void ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record.key, record.hasWrite); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); + } + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); + } + AsyncBatch.OnRecord(cluster, listener, record, batchIndex); + } + + internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + // Set inDoubt, but do not call OnRecord() because user already has access to full + // BatchRecord list and can examine each record for inDoubt when the exception occurs. + record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchOperateSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchOperateSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); + } + } + + //------------------------------------------------------- + // OperateRecordArray + //------------------------------------------------------- + + public sealed class AsyncBatchOperateRecordArrayExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + internal readonly BatchRecord[] records; + + public AsyncBatchOperateRecordArrayExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Key[] keys, + Operation[] ops, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + this.records[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, attr.hasWrite, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchOperateRecordArrayCommand(this, cluster, batchNode, policy, keys, ops, records, attr); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(records, ae); + } + } + + sealed class AsyncBatchOperateRecordArrayCommand : AsyncBatchCommand + { + internal readonly Key[] keys; + internal readonly Operation[] ops; + internal readonly BatchRecord[] records; + internal readonly BatchAttr attr; + + public AsyncBatchOperateRecordArrayCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + Operation[] ops, + BatchRecord[] records, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, ops != null) + { + this.keys = keys; + this.ops = ops; + this.records = records; + this.attr = attr; + } + + public AsyncBatchOperateRecordArrayCommand(AsyncBatchOperateRecordArrayCommand other) : base(other) + { + this.keys = other.keys; + this.ops = other.ops; + this.records = other.records; + this.attr = other.attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); + } + + protected internal override void ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record.key, record.hasWrite); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + } + + internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt || !attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + + policy.Txn?.OnWriteInDoubt(record.key); + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchOperateRecordArrayCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchOperateRecordArrayCommand(parent, cluster, batchNode, batchPolicy, keys, ops, records, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); + } + } + + //------------------------------------------------------- + // OperateRecordSequence + //------------------------------------------------------- + + public sealed class AsyncBatchOperateRecordSequenceExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordSequenceListener listener; + private readonly bool[] sent; + + public AsyncBatchOperateRecordSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordSequenceListener listener, + Key[] keys, + Operation[] ops, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.sent = new bool[keys.Length]; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchOperateRecordSequenceCommand(this, cluster, batchNode, policy, keys, ops, sent, listener, attr); + } + this.commands = tasks; + } + + public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) + { + BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite); + sent[index] = true; + AsyncBatch.OnRecord(cluster, listener, record, index); + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchOperateRecordSequenceCommand : AsyncBatchCommand + { + internal readonly Key[] keys; + internal readonly Operation[] ops; + internal readonly bool[] sent; + internal readonly BatchRecordSequenceListener listener; + internal readonly BatchAttr attr; + + public AsyncBatchOperateRecordSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + Operation[] ops, + bool[] sent, + BatchRecordSequenceListener listener, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, ops != null) + { + this.keys = keys; + this.ops = ops; + this.sent = sent; + this.listener = listener; + this.attr = attr; + } + + public AsyncBatchOperateRecordSequenceCommand(AsyncBatchOperateRecordSequenceCommand other) : base(other) + { + this.keys = other.keys; + this.ops = other.ops; + this.sent = other.sent; + this.listener = other.listener; + this.attr = other.attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); + } + + protected internal override void ParseRow() + { + Key keyOrig = keys[batchIndex]; + + ParseFields(keyOrig, attr.hasWrite); + + BatchRecord record; + + if (resultCode == 0) + { + record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite); + } + else + { + record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); + } + sent[batchIndex] = true; + AsyncBatch.OnRecord(cluster, listener, record, batchIndex); + } + + internal override void SetInDoubt(bool inDoubt) + { + // Set inDoubt for all unsent records, so the listener receives a full set of records. + foreach (int index in batch.offsets) + { + if (!sent[index]) + { + Key key = keys[index]; + BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); + sent[index] = true; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(key); + } + + AsyncBatch.OnRecord(cluster, listener, record, index); + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchOperateRecordSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchOperateRecordSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, ops, sent, listener, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); + } + } + + //------------------------------------------------------- + // UDFArray + //------------------------------------------------------- + + public sealed class AsyncBatchUDFArrayExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + internal readonly BatchRecord[] recordArray; + + public AsyncBatchUDFArrayExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.recordArray = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + this.recordArray[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, recordArray, attr.hasWrite, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchUDFArrayCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, recordArray, attr); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(recordArray, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(recordArray, ae); + } + } + + public sealed class AsyncBatchUDFArrayCommand : AsyncBatchCommand + { + internal readonly Key[] keys; + internal readonly string packageName; + internal readonly string functionName; + internal readonly byte[] argBytes; + internal readonly BatchRecord[] records; + internal readonly BatchAttr attr; + + public AsyncBatchUDFArrayCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchRecord[] records, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.packageName = packageName; + this.functionName = functionName; + this.argBytes = argBytes; + this.records = records; + this.attr = attr; + } + + public AsyncBatchUDFArrayCommand(AsyncBatchUDFArrayCommand other) : base(other) + { + this.keys = other.keys; + this.packageName = other.packageName; + this.functionName = other.functionName; + this.argBytes = other.argBytes; + this.records = other.records; + this.attr = other.attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); + } + + protected internal override void ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record.key, record.hasWrite); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + return; + } + + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter); + parent.SetRowError(); + return; + } + } + + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + + internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt || !attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + + policy.Txn?.OnWriteInDoubt(record.key); + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchUDFArrayCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchUDFArrayCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); + } + } + + //------------------------------------------------------- + // UDFSequence + //------------------------------------------------------- + + public sealed class AsyncBatchUDFSequenceExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordSequenceListener listener; + private readonly bool[] sent; + + public AsyncBatchUDFSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordSequenceListener listener, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.sent = new bool[keys.Length]; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchUDFSequenceCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, sent, listener, attr); + } + this.commands = tasks; + } + + public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) + { + BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite); + sent[index] = true; + AsyncBatch.OnRecord(cluster, listener, record, index); + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchUDFSequenceCommand : AsyncBatchCommand + { + internal readonly Key[] keys; + internal readonly string packageName; + internal readonly string functionName; + internal readonly byte[] argBytes; + internal readonly bool[] sent; + internal readonly BatchRecordSequenceListener listener; + internal readonly BatchAttr attr; + + public AsyncBatchUDFSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + bool[] sent, + BatchRecordSequenceListener listener, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.packageName = packageName; + this.functionName = functionName; + this.argBytes = argBytes; + this.sent = sent; + this.listener = listener; + this.attr = attr; + } + + public AsyncBatchUDFSequenceCommand(AsyncBatchUDFSequenceCommand other) : base(other) + { + this.keys = other.keys; + this.packageName = other.packageName; + this.functionName = other.functionName; + this.argBytes = other.argBytes; + this.sent = other.sent; + this.listener = other.listener; + this.attr = other.attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); + } + + protected internal override void ParseRow() + { + Key keyOrig = keys[batchIndex]; + + ParseFields(keyOrig, attr.hasWrite); + + BatchRecord record; + + if (resultCode == 0) + { + record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite); + } + else if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record = new BatchRecord(keyOrig, r, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); + } + else + { + record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); + } + } + else + { + record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); + } + sent[batchIndex] = true; + AsyncBatch.OnRecord(cluster, listener, record, batchIndex); + } + + internal override void SetInDoubt(bool inDoubt) + { + // Set inDoubt for all unsent records, so the listener receives a full set of records. + foreach (int index in batch.offsets) + { + if (!sent[index]) + { + Key key = keys[index]; + BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); + sent[index] = true; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } + + AsyncBatch.OnRecord(cluster, listener, record, index); + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchUDFSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchUDFSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, sent, listener, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); + } + } + + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public sealed class AsyncBatchTxnVerifyExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + private readonly BatchRecord[] records; + + public AsyncBatchTxnVerifyExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Key[] keys, + long?[] versions, + BatchRecord[] records + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchTxnVerifyCommand(this, cluster, batchNode, policy, keys, versions, records); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(records, ae); + } + } + + sealed class AsyncBatchTxnVerifyCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly long?[] versions; + private readonly BatchRecord[] records; + + public AsyncBatchTxnVerifyCommand( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + long?[] versions, + BatchRecord[] records + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.versions = versions; + this.records = records; + } + + public AsyncBatchTxnVerifyCommand(AsyncBatchTxnVerifyCommand other) : base(other) + { + this.keys = other.keys; + this.versions = other.versions; + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnVerify(batchPolicy, keys, versions, batch); + } + + protected internal override void ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, false); + parent.SetRowError(); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchTxnVerifyCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchTxnVerifyCommand(parent, cluster, batchNode, batchPolicy, keys, versions, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + public sealed class AsyncBatchTxnRollExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + private readonly BatchRecord[] records; + + public AsyncBatchTxnRollExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Txn txn, + Key[] keys, + BatchRecord[] records, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, true, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchTxnRollCommand(this, cluster, batchNode, policy, txn, keys, records, attr); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(records, ae); + } + } + + sealed class AsyncBatchTxnRollCommand : AsyncBatchCommand + { + private readonly Txn txn; + private readonly Key[] keys; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public AsyncBatchTxnRollCommand( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Txn txn, + Key[] keys, + BatchRecord[] records, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.txn = txn; + this.keys = keys; + this.records = records; + this.attr = attr; + } + + public AsyncBatchTxnRollCommand(AsyncBatchTxnRollCommand other) : base(other) + { + this.keys = other.keys; + this.attr = other.attr; + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnRoll(batchPolicy, txn, keys, batch, attr); + } + + protected internal override void ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchTxnRollCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchTxnRollCommand(parent, cluster, batchNode, batchPolicy, txn, keys, records, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, true, parent); + } + } + + //------------------------------------------------------- + // Batch Base Executor + //------------------------------------------------------- + + public abstract class AsyncBatchExecutor : IBatchStatus + { + private AerospikeException exception; + private int max; + private int count; + private readonly bool hasResultCode; + private bool error; + public AsyncBatchCommand[] commands; + public AsyncCluster cluster; + + public AsyncBatchExecutor(AsyncCluster cluster, bool hasResultCode) + { + this.hasResultCode = hasResultCode; + this.cluster = cluster; + cluster.AddCommandCount(); + } + + public void Execute() + { + Execute(commands); + } + + public void Execute(AsyncBatchCommand[] commands) + { + max = commands.Length; + + foreach (AsyncBatchCommand command in commands) + { + command.Execute(); + } + } + + public void Retry(AsyncMultiCommand[] commands) + { + lock (this) + { + // Adjust max for new commands minus failed command. + max += commands.Length - 1; + } + + foreach (AsyncBatchCommand command in commands.Cast()) + { + command.ExecuteBatchRetry(); + } + } + + public void ChildSuccess(AsyncNode node) + { + bool complete; + + lock (this) + { + complete = ++count == max; + } + + if (complete) + { + Finish(); + } + } + + public void ChildFailure(AerospikeException ae) + { + bool complete; + + lock (this) + { + if (exception == null) + { + exception = ae; + } + complete = ++count == max; + } + + if (complete) + { + Finish(); + } + } + + private void Finish() + { + if (exception == null) + { + OnSuccess(); + } + else + { + OnFailure(exception); + } + } + + public virtual void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) + { + // Only used in executors with sequence listeners. + // These executors will override this method. + } + + public void BatchKeyError(AerospikeException ae) + { + error = true; + + if (!hasResultCode) + { + // Legacy batch read commands that do not store a key specific resultCode. + // Store exception which will be passed to the listener on batch completion. + if (exception == null) + { + exception = ae; + } + } + } + + public void SetRowError() + { + // Indicate that a key specific error occurred. + error = true; + } + + public bool GetStatus() + { + return !error; + } + + protected internal abstract void OnSuccess(); + protected internal abstract void OnFailure(AerospikeException ae); + } + + //------------------------------------------------------- + // Batch Base Command + //------------------------------------------------------- + + public abstract class AsyncBatchCommand : AsyncMultiCommand + { + internal readonly AsyncBatchExecutor parent; + internal readonly BatchNode batch; + internal readonly BatchPolicy batchPolicy; + internal uint sequenceAP; + internal uint sequenceSC; + + public AsyncBatchCommand(AsyncBatchExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, bool isOperation) + : base(cluster, batchPolicy, (AsyncNode)batch.node, isOperation) + { + this.parent = parent; + this.batch = batch; + this.batchPolicy = batchPolicy; + } + + public AsyncBatchCommand(AsyncBatchCommand other) : base(other) + { + this.parent = other.parent; + this.batch = other.batch; + this.batchPolicy = other.batchPolicy; + this.sequenceAP = other.sequenceAP; + this.sequenceSC = other.sequenceSC; + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.BATCH; + } + + protected void ParseFieldsRead(Key key) + { + if (policy.Txn != null) + { + long? version = ParseVersion(fieldCount); + policy.Txn.OnRead(key, version); + } + else + { + SkipKey(fieldCount); + } + } + + protected void ParseFields(Key key, bool hasWrite) + { + if (policy.Txn != null) + { + long? version = ParseVersion(fieldCount); + + if (hasWrite) + { + policy.Txn.OnWrite(key, version, resultCode); + } + else + { + policy.Txn.OnRead(key, version); + } + } + else + { + SkipKey(fieldCount); + } + } + + protected internal override bool PrepareRetry(bool timeout) + { + if (!(policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK)) + { + // Perform regular retry to same node. + return true; + } + + sequenceAP++; + + if (! timeout || policy.readModeSC != ReadModeSC.LINEARIZE) { + sequenceSC++; + } + return false; + } + + protected internal override bool RetryBatch() + { + List batchNodes = null; + + try + { + // Retry requires keys for this node to be split among other nodes. + // This can cause an exponential number of commands. + batchNodes = GenerateBatchNodes(); + + if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) + { + // Batch node is the same. Go through normal retry. + // Normal retries reuse eventArgs, so PutBackArgsOnError() + // should not be called here. + return false; + } + + cluster.AddRetries(batchNodes.Count); + } + catch (Exception) + { + // Close original command. + base.ReleaseBuffer(); + throw; + } + + // Close original command. + base.ReleaseBuffer(); + + // Execute new commands. + AsyncBatchCommand[] cmds = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + AsyncBatchCommand cmd = CreateCommand(batchNode); + cmd.sequenceAP = sequenceAP; + cmd.sequenceSC = sequenceSC; + cmd.SetBatchRetry(this); + cmds[count++] = cmd; + } + + // Retry new commands. + parent.Retry(cmds); + + // Return true so original batch command is stopped. + return true; + } + + protected internal override void OnSuccess() + { + parent.ChildSuccess(node); + } + + protected internal override void OnFailure(AerospikeException e) + { + SetInDoubt(e.InDoubt); + parent.ChildFailure(e); + } + + internal virtual void SetInDoubt(bool inDoubt) + { + // Do nothing by default. Batch writes will override this method. + } + + internal abstract AsyncBatchCommand CreateCommand(BatchNode batchNode); + internal abstract List GenerateBatchNodes(); + } + + internal class AsyncBatch + { + internal static void OnRecord(Cluster cluster, BatchRecordSequenceListener listener, BatchRecord record, int index) + { + try + { + listener.OnRecord(record, index); + } + catch (Exception e) + { + Log.Error(cluster.context, "Unexpected exception from OnRecord(): " + Util.GetErrorMessage(e)); + } + } + } +} diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs index a60e2b70..e1647a5b 100644 --- a/AerospikeClient/Async/AsyncClient.cs +++ b/AerospikeClient/Async/AsyncClient.cs @@ -15,6 +15,8 @@ * the License. */ +using static Aerospike.Client.AsyncQueryValidate; + namespace Aerospike.Client { /// @@ -57,7 +59,7 @@ public class AsyncClient : AerospikeClient, IAsyncClient /// /// /// If the connection succeeds, the client is ready to process database requests. - /// If the connection fails, the cluster will remain in a disconnected state + /// If the connection fails, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -81,7 +83,7 @@ public AsyncClient(string hostname, int port) /// /// If the connection succeeds, the client is ready to process database requests. /// If the connection fails and the policy's failOnInvalidHosts is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -110,7 +112,7 @@ public AsyncClient(AsyncClientPolicy policy, string hostname, int port) /// /// If one connection succeeds, the client is ready to process database requests. /// If all connections fail and the policy's failIfNotConnected is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -128,6 +130,112 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts) base.cluster = this.cluster; } + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Asynchronously attempt to commit the given multi-record transaction. + /// Create listener, call asynchronous commit and return task monitor. + /// + /// multi-record transaction + /// cancellation token + public Task Commit(Txn txn, CancellationToken token) + { + var listener = new CommitListenerAdapter(token); + Commit(listener, txn); + return listener.Task; + } + + /// + /// Asynchronously attempt to commit the given multi-record transaction. First, the expected + /// record versions are sent to the server nodes for verification. If all nodes return success, + /// the transaction is committed. Otherwise, the transaction is aborted. + /// + /// Schedules the commit command with a channel selector and return. + /// Another thread will process the command and send the results to the listener. + /// + /// + /// Requires server version 8.0+ + /// + /// + /// where to send results + /// multi-record transaction + public void Commit(CommitListener listener, Txn txn) + { + AsyncTxnRoll atr = new( + cluster, txnVerifyPolicyDefault, txnRollPolicyDefault, txn + ); + + switch (txn.State) + { + default: + case Txn.TxnState.OPEN: + atr.Verify(listener); + break; + + case Txn.TxnState.VERIFIED: + atr.Commit(listener); + break; + + case Txn.TxnState.COMMITTED: + listener.OnSuccess(CommitStatus.CommitStatusType.ALREADY_COMMITTED); + break; + + case Txn.TxnState.ABORTED: + listener.OnSuccess(CommitStatus.CommitStatusType.ALREADY_ABORTED); + break; + + } + } + + /// + /// Asynchronously attempt to abort and rollback the given multi-record transaction. + /// Create listener, call asynchronous commit and return task monitor. + /// + /// multi-record transaction + /// cancellation token + public Task Abort(Txn txn, CancellationToken token) + { + var listener = new AbortListenerAdapter(token); + Abort(listener, txn); + return listener.Task; + } + + + /// + /// Asynchronously abort and rollback the given multi-record transaction. + /// + /// Schedules the abort command with a channel selector and return. + /// Another thread will process the command and send the results to the listener. + /// + /// Requires server version 8.0+ + /// + /// + /// where to send results + /// multi-record transaction + public void Abort(AbortListener listener, Txn txn) + { + AsyncTxnRoll atr = new(cluster, null, txnRollPolicyDefault, txn); + + switch (txn.State) + { + default: + case Txn.TxnState.OPEN: + case Txn.TxnState.VERIFIED: + atr.Abort(listener); + break; + + case Txn.TxnState.COMMITTED: + listener.OnSuccess(AbortStatus.AbortStatusType.ALREADY_COMMITTED); + break; + + case Txn.TxnState.ABORTED: + listener.OnSuccess(AbortStatus.AbortStatusType.ALREADY_ABORTED); + break; + } + } + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- @@ -136,7 +244,7 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts) /// Asynchronously write record bin(s). /// Create listener, call asynchronous put and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -151,13 +259,13 @@ public Task Put(WritePolicy policy, CancellationToken token, Key key, params Bin Put(policy, listener, key, bins); return listener.Task; } - + /// /// Asynchronously write record bin(s). /// Schedules the put command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -173,7 +281,7 @@ public void Put(WritePolicy policy, WriteListener listener, Key key, params Bin[ policy = writePolicyDefault; } AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.WRITE); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } //------------------------------------------------------- @@ -184,7 +292,7 @@ public void Put(WritePolicy policy, WriteListener listener, Key key, params Bin[ /// Asynchronously append bin string values to existing record bin values. /// Create listener, call asynchronous append and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -206,7 +314,7 @@ public Task Append(WritePolicy policy, CancellationToken token, Key key, params /// Schedule the append command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -223,14 +331,14 @@ public void Append(WritePolicy policy, WriteListener listener, Key key, params B policy = writePolicyDefault; } AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.APPEND); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } /// /// Asynchronously prepend bin string values to existing record bin values. /// Create listener, call asynchronous prepend and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -252,7 +360,7 @@ public Task Prepend(WritePolicy policy, CancellationToken token, Key key, params /// Schedule the prepend command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -269,7 +377,7 @@ public void Prepend(WritePolicy policy, WriteListener listener, Key key, params policy = writePolicyDefault; } AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.PREPEND); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } //------------------------------------------------------- @@ -280,7 +388,7 @@ public void Prepend(WritePolicy policy, WriteListener listener, Key key, params /// Asynchronously add integer/double bin values to existing record bin values. /// Create listener, call asynchronous add and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -301,7 +409,7 @@ public Task Add(WritePolicy policy, CancellationToken token, Key key, params Bin /// Schedule the add command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -317,7 +425,7 @@ public void Add(WritePolicy policy, WriteListener listener, Key key, params Bin[ policy = writePolicyDefault; } AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.ADD); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } //------------------------------------------------------- @@ -355,7 +463,7 @@ public void Delete(WritePolicy policy, DeleteListener listener, Key key) policy = writePolicyDefault; } AsyncDelete async = new AsyncDelete(cluster, policy, key, listener); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } /// @@ -411,7 +519,8 @@ public void Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Batc BatchAttr attr = new BatchAttr(); attr.SetDelete(deletePolicy); - new AsyncBatchOperateRecordArrayExecutor(cluster, batchPolicy, listener, keys, null, attr); + AsyncBatchOperateRecordArrayExecutor executor = new(cluster, batchPolicy, listener, keys, null, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } /// @@ -451,7 +560,8 @@ public void Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Batc BatchAttr attr = new BatchAttr(); attr.SetDelete(deletePolicy); - new AsyncBatchOperateRecordSequenceExecutor(cluster, batchPolicy, listener, keys, null, attr); + AsyncBatchOperateRecordSequenceExecutor executor = new(cluster, batchPolicy, listener, keys, null, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } //------------------------------------------------------- @@ -491,7 +601,7 @@ public void Touch(WritePolicy policy, WriteListener listener, Key key) policy = writePolicyDefault; } AsyncTouch async = new AsyncTouch(cluster, policy, listener, key); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } /// @@ -531,7 +641,7 @@ public void Touched(WritePolicy policy, ExistsListener listener, Key key) policy = writePolicyDefault; } AsyncTouch async = new(cluster, policy, listener, key); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } //------------------------------------------------------- @@ -568,6 +678,9 @@ public void Exists(Policy policy, ExistsListener listener, Key key) { policy = readPolicyDefault; } + + policy.Txn?.PrepareRead(key.ns); + AsyncExists async = new AsyncExists(cluster, policy, key, listener); async.Execute(); } @@ -607,7 +720,10 @@ public void Exists(BatchPolicy policy, ExistsArrayListener listener, Key[] keys) { policy = batchPolicyDefault; } - new AsyncBatchExistsArrayExecutor(cluster, policy, keys, listener); + policy.Txn?.PrepareRead(keys); + + AsyncBatchExistsArrayExecutor executor = new(cluster, policy, keys, listener); + executor.Execute(); } /// @@ -630,7 +746,10 @@ public void Exists(BatchPolicy policy, ExistsSequenceListener listener, Key[] ke { policy = batchPolicyDefault; } - new AsyncBatchExistsSequenceExecutor(cluster, policy, keys, listener); + policy.Txn?.PrepareRead(keys); + + AsyncBatchExistsSequenceExecutor executor = new(cluster, policy, keys, listener); + executor.Execute(); } //------------------------------------------------------- @@ -667,6 +786,9 @@ public void Get(Policy policy, RecordListener listener, Key key) { policy = readPolicyDefault; } + + policy.Txn?.PrepareRead(key.ns); + AsyncRead async = new AsyncRead(cluster, policy, listener, key, (string[])null); async.Execute(); } @@ -703,6 +825,9 @@ public void Get(Policy policy, RecordListener listener, Key key, params string[] { policy = readPolicyDefault; } + + policy.Txn?.PrepareRead(key.ns); + AsyncRead async = new AsyncRead(cluster, policy, listener, key, binNames); async.Execute(); } @@ -737,6 +862,9 @@ public void GetHeader(Policy policy, RecordListener listener, Key key) { policy = readPolicyDefault; } + + policy.Txn?.PrepareRead(key.ns); + AsyncReadHeader async = new AsyncReadHeader(cluster, policy, listener, key); async.Execute(); } @@ -789,7 +917,10 @@ public void Get(BatchPolicy policy, BatchListListener listener, List { policy = batchPolicyDefault; } - new AsyncBatchReadListExecutor(cluster, policy, listener, records); + policy.Txn?.PrepareRead(records); + + AsyncBatchReadListExecutor executor = new(cluster, policy, listener, records); + executor.Execute(); } /// @@ -817,7 +948,10 @@ public void Get(BatchPolicy policy, BatchSequenceListener listener, List @@ -861,7 +995,10 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys) { policy = batchPolicyDefault; } - new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); + policy.Txn?.PrepareRead(keys); + + AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); + executor.Execute(); } /// @@ -887,7 +1024,10 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys) { policy = batchPolicyDefault; } - new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); + policy.Txn?.PrepareRead(keys); + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); + executor.Execute(); } /// @@ -933,7 +1073,10 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys, pa { policy = batchPolicyDefault; } - new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false); + policy.Txn?.PrepareRead(keys); + + AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false); + executor.Execute(); } /// @@ -960,7 +1103,13 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys, { policy = batchPolicyDefault; } - new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false); + policy.Txn?.PrepareRead(keys); + + int readAttr = (binNames == null || binNames.Length == 0)? + Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ; + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, binNames, null, readAttr, false); + executor.Execute(); } /// @@ -1008,7 +1157,11 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys, pa { policy = batchPolicyDefault; } - new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true); + + policy.Txn?.PrepareRead(keys); + + AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true); + executor.Execute(); } /// @@ -1037,7 +1190,11 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys, { policy = batchPolicyDefault; } - new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true); + + policy.Txn?.PrepareRead(keys); + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true); + executor.Execute(); } /// @@ -1081,7 +1238,10 @@ public void GetHeader(BatchPolicy policy, RecordArrayListener listener, Key[] ke { policy = batchPolicyDefault; } - new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); + policy.Txn?.PrepareRead(keys); + + AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); + executor.Execute(); } /// @@ -1107,7 +1267,10 @@ public void GetHeader(BatchPolicy policy, RecordSequenceListener listener, Key[] { policy = batchPolicyDefault; } - new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); + policy.Txn?.PrepareRead(keys); + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); + executor.Execute(); } //------------------------------------------------------- @@ -1155,9 +1318,21 @@ public Task Operate(WritePolicy policy, CancellationToken token, Key key /// if queue is full public void Operate(WritePolicy policy, RecordListener listener, Key key, params Operation[] ops) { - OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, key, ops); - AsyncOperate async = new AsyncOperate(cluster, listener, key, args); - async.Execute(); + OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, ops); + policy = args.writePolicy; + + if (args.hasWrite) + { + AsyncOperateWrite async = new(cluster, listener, key, args); + AsyncTxnMonitor.Execute(cluster, policy, async); + } + else + { + policy.Txn?.PrepareRead(key.ns); + + AsyncOperateRead async = new(cluster, listener, key, args); + async.Execute(); + } } //------------------------------------------------------- @@ -1211,7 +1386,8 @@ public void Operate(BatchPolicy policy, BatchOperateListListener listener, List< { policy = batchParentPolicyWriteDefault; } - new AsyncBatchOperateListExecutor(cluster, policy, listener, records); + AsyncBatchOperateListExecutor executor = new(cluster, policy, listener, records); + AsyncTxnMonitor.ExecuteBatch(policy, executor, records); } /// @@ -1245,7 +1421,8 @@ public void Operate(BatchPolicy policy, BatchRecordSequenceListener listener, Li { policy = batchParentPolicyWriteDefault; } - new AsyncBatchOperateSequenceExecutor(cluster, policy, listener, records); + AsyncBatchOperateSequenceExecutor executor = new(cluster, policy, listener, records); + AsyncTxnMonitor.ExecuteBatch(policy, executor, records); } /// @@ -1309,7 +1486,8 @@ public void Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Batch } BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); - new AsyncBatchOperateRecordArrayExecutor(cluster, batchPolicy, listener, keys, ops, attr); + AsyncBatchOperateRecordArrayExecutor executor = new(cluster, batchPolicy, listener, keys, ops, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } /// @@ -1352,7 +1530,8 @@ public void Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Batch } BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); - new AsyncBatchOperateRecordSequenceExecutor(cluster, batchPolicy, listener, keys, ops, attr); + AsyncBatchOperateRecordSequenceExecutor executor = new(cluster, batchPolicy, listener, keys, ops, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } //------------------------------------------------------- @@ -1454,7 +1633,7 @@ public Task Execute(WritePolicy policy, CancellationToken token, Key key /// server package name where user defined function resides /// user defined function /// arguments passed in to user defined function - /// if transaction fails + /// if command fails public void Execute(WritePolicy policy, ExecuteListener listener, Key key, string packageName, string functionName, params Value[] functionArgs) { if (policy == null) @@ -1462,7 +1641,7 @@ public void Execute(WritePolicy policy, ExecuteListener listener, Key key, strin policy = writePolicyDefault; } AsyncExecute command = new AsyncExecute(cluster, policy, listener, key, packageName, functionName, functionArgs); - command.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, command); } /// @@ -1528,7 +1707,8 @@ public void Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, BatchReco BatchAttr attr = new BatchAttr(); attr.SetUDF(udfPolicy); - new AsyncBatchUDFArrayExecutor(cluster, batchPolicy, listener, keys, packageName, functionName, argBytes, attr); + AsyncBatchUDFArrayExecutor executor = new(cluster, batchPolicy, listener, keys, packageName, functionName, argBytes, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } /// @@ -1575,7 +1755,8 @@ public void Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, BatchReco BatchAttr attr = new BatchAttr(); attr.SetUDF(udfPolicy); - new AsyncBatchUDFSequenceExecutor(cluster, batchPolicy, listener, keys, packageName, functionName, argBytes, attr); + AsyncBatchUDFSequenceExecutor executor = new(cluster, batchPolicy, listener, keys, packageName, functionName, argBytes, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } //------------------------------------------------------- diff --git a/AerospikeClient/Async/AsyncClientPolicy.cs b/AerospikeClient/Async/AsyncClientPolicy.cs index 339ebc4f..0ff626f8 100644 --- a/AerospikeClient/Async/AsyncClientPolicy.cs +++ b/AerospikeClient/Async/AsyncClientPolicy.cs @@ -86,7 +86,7 @@ public sealed class AsyncClientPolicy : ClientPolicy public int asyncMinConnsPerNode; /// - /// Maximum number of asynchronous connections allowed per server node. Transactions will go + /// Maximum number of asynchronous connections allowed per server node. Commands will go /// through retry logic and potentially fail with "ResultCode.NO_MORE_CONNECTIONS" if the maximum /// number of connections would be exceeded. /// diff --git a/AerospikeClient/Async/AsyncCluster.cs b/AerospikeClient/Async/AsyncCluster.cs index 213a2202..43190a16 100644 --- a/AerospikeClient/Async/AsyncCluster.cs +++ b/AerospikeClient/Async/AsyncCluster.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -71,7 +71,7 @@ public AsyncCluster(AsyncClientPolicy policy, Host[] hosts) throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Unsupported MaxCommandAction value: " + policy.asyncMaxCommandAction.ToString()); } - InitTendThread(policy.failIfNotConnected); + StartTendThread(policy); } protected internal override Node CreateNode(NodeValidator nv, bool createMinConn) diff --git a/AerospikeClient/Async/AsyncCommand.cs b/AerospikeClient/Async/AsyncCommand.cs index 4a295901..3221ac14 100644 --- a/AerospikeClient/Async/AsyncCommand.cs +++ b/AerospikeClient/Async/AsyncCommand.cs @@ -180,14 +180,14 @@ private void ExecuteCore() { if (totalTimeout > 0) { - // Timeout already added in Execute(). Verify state. + // Timeout already added in Execute(). Verify State. if (state != IN_PROGRESS) { // Total timeout might have occurred if command was in the delay queue. // Socket timeout should not be possible for commands in the delay queue. if (state != FAIL_TOTAL_TIMEOUT) { - Log.Error(cluster.context, "Unexpected state at async command start: " + state); + Log.Error(cluster.context, "Unexpected State at async command start: " + state); } // User has already been notified of the total timeout. Release buffer and // return for all error states. @@ -431,7 +431,7 @@ public void ReceiveComplete() { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future - // transactions do not fail. + // commands do not fail. node.SignalLogin(); // This is a rare event because the client tracks session @@ -724,7 +724,7 @@ public bool CheckTimeout() node?.AddTimeout(); // Notify user immediately in this timeout thread. - // Transaction thread will cleanup eventArgs. + // Command thread will cleanup eventArgs. NotifyFailure(new AerospikeException.Timeout(policy, true)); } return false; // Do not put back on timeout queue. @@ -746,7 +746,7 @@ public bool CheckTimeout() // Socket timeout has occurred. if (Interlocked.CompareExchange(ref state, FAIL_SOCKET_TIMEOUT, IN_PROGRESS) == IN_PROGRESS) { - // User will be notified in transaction thread and this timeout thread. + // User will be notified in command thread and this timeout thread. // Close connection. This will result in a socket error in the async callback thread // and a possible retry. if (node != null && conn != null) @@ -775,14 +775,14 @@ protected internal void Finish() } else if (status == FAIL_TOTAL_TIMEOUT) { - // Timeout thread closed connection, but transaction still completed. + // Timeout thread closed connection, but command still completed. // User has already been notified with timeout. Release buffer and return. ReleaseBuffer(); return; } else if (status == FAIL_SOCKET_TIMEOUT) { - // Timeout thread closed connection, but transaction still completed. + // Timeout thread closed connection, but command still completed. // User has not been notified of the timeout. Release buffer and let // OnSuccess() be called. ReleaseBuffer(); @@ -908,6 +908,12 @@ private void NotifyFailure(AerospikeException ae) ae.Policy = policy; ae.Iteration = iteration; ae.SetInDoubt(IsWrite(), commandSentCounter); + + if (ae.InDoubt) + { + OnInDoubt(); + } + OnFailure(ae); } catch (Exception e) @@ -943,6 +949,12 @@ internal void ReleaseBuffer() } } + // Do nothing by default. Write commands will override this method. + protected internal virtual void OnInDoubt() + { + + } + protected internal virtual bool RetryBatch() { return false; diff --git a/AerospikeClient/Async/AsyncConnectionTls.cs b/AerospikeClient/Async/AsyncConnectionTls.cs index 1fa32aa1..80819d41 100644 --- a/AerospikeClient/Async/AsyncConnectionTls.cs +++ b/AerospikeClient/Async/AsyncConnectionTls.cs @@ -202,7 +202,7 @@ private void ReceiveEvent(IAsyncResult result) { // Do not call command completed methods because that can result in a new // Send()/Receive() call before the current sync BeginRead() has completed. - // Instead, set the state which will be handled after BeginRead() is done. + // Instead, set the State which will be handled after BeginRead() is done. try { int received = sslStream.EndRead(result); diff --git a/AerospikeClient/Async/AsyncConnector.cs b/AerospikeClient/Async/AsyncConnector.cs index 7bf07a6b..634dc0be 100644 --- a/AerospikeClient/Async/AsyncConnector.cs +++ b/AerospikeClient/Async/AsyncConnector.cs @@ -225,7 +225,7 @@ public void ReceiveComplete() { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future - // transactions do not fail. + // commands do not fail. node.SignalLogin(); // This is a rare event because the client tracks session diff --git a/AerospikeClient/Async/AsyncDelete.cs b/AerospikeClient/Async/AsyncDelete.cs index cd63221b..cafbd036 100644 --- a/AerospikeClient/Async/AsyncDelete.cs +++ b/AerospikeClient/Async/AsyncDelete.cs @@ -1,123 +1,93 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class AsyncDelete : AsyncSingleCommand - { - private readonly WritePolicy writePolicy; - private readonly DeleteListener listener; - private readonly Key key; - private readonly Partition partition; - private bool existed; - - public AsyncDelete(AsyncCluster cluster, WritePolicy writePolicy, Key key, DeleteListener listener) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.listener = listener; - this.key = key; - this.partition = Partition.Write(cluster, policy, key); - cluster.AddTran(); - } - - public AsyncDelete(AsyncDelete other) - : base(other) - { - this.writePolicy = other.writePolicy; - this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncDelete(this); - } - - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - - protected internal override void WriteBuffer() - { - SetDelete(writePolicy, key); - } - - protected internal override void ParseResult() - { - int resultCode = dataBuffer[dataOffset + 5]; - - if (resultCode == 0) - { - existed = true; - return; - } - - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) - { - existed = false; - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (policy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - existed = true; - return; - } - - throw new AerospikeException(resultCode); - } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - - protected internal override void OnSuccess() - { - if (listener != null) - { - listener.OnSuccess(key, existed); - } - } - - protected internal override void OnFailure(AerospikeException e) - { - if (listener != null) - { - listener.OnFailure(e); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncDelete : AsyncWriteBase + { + private readonly DeleteListener listener; + private bool existed; + + public AsyncDelete(AsyncCluster cluster, WritePolicy writePolicy, Key key, DeleteListener listener) + : base(cluster, writePolicy, key) + { + this.listener = listener; + } + + public AsyncDelete(AsyncDelete other) + : base(other) + { + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncDelete(this); + } + + protected internal override void WriteBuffer() + { + SetDelete(writePolicy, Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK) + { + existed = true; + return true; + } + + if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + existed = false; + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + existed = true; + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key, existed); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} diff --git a/AerospikeClient/Async/AsyncExecute.cs b/AerospikeClient/Async/AsyncExecute.cs index d3cea1f3..01503c9d 100644 --- a/AerospikeClient/Async/AsyncExecute.cs +++ b/AerospikeClient/Async/AsyncExecute.cs @@ -15,15 +15,17 @@ * the License. */ +using System; + namespace Aerospike.Client { - public sealed class AsyncExecute : AsyncRead + public sealed class AsyncExecute : AsyncWriteBase { - private readonly WritePolicy writePolicy; private readonly ExecuteListener executeListener; private readonly string packageName; private readonly string functionName; private readonly Value[] args; + private Record record; public AsyncExecute ( @@ -36,7 +38,6 @@ public AsyncExecute Value[] args ) : base(cluster, writePolicy, key) { - this.writePolicy = writePolicy; this.executeListener = listener; this.packageName = packageName; this.functionName = functionName; @@ -46,7 +47,6 @@ Value[] args public AsyncExecute(AsyncExecute other) : base(other) { - this.writePolicy = other.writePolicy; this.executeListener = other.executeListener; this.packageName = other.packageName; this.functionName = other.functionName; @@ -58,35 +58,66 @@ protected internal override AsyncCommand CloneCommand() return new AsyncExecute(this); } - protected internal override bool IsWrite() + protected internal override void WriteBuffer() { - return true; + SetUdf(writePolicy, Key, packageName, functionName, args); } - protected internal override Node GetNode(Cluster cluster) + protected internal override bool ParseResult() { - return partition.GetNodeWrite(cluster); - } + ParseHeader(); + ParseFields(policy.Txn, Key, true); - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } + if (resultCode == ResultCode.OK) + { + record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false); + return true; + } - protected internal override void WriteBuffer() - { - SetUdf(writePolicy, key, packageName, functionName, args); - } + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false); + HandleUdfError(resultCode); + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return true; + } - protected internal override void HandleNotFound(int resultCode) - { throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) + private void HandleUdfError(int resultCode) { - partition.PrepareRetryWrite(timeout); - return true; + string ret = (string)record.bins["FAILURE"]; + + if (ret == null) + { + throw new AerospikeException(resultCode); + } + + String message; + int code; + + try + { + string[] list = ret.Split(":"); + Int32.TryParse(list[2].Trim(), out code); + message = list[0] + ':' + list[1] + ' ' + list[3]; + } + catch (Exception e) + { + // Use generic exception if parse error occurs. + throw new AerospikeException(resultCode, ret); + } + + throw new AerospikeException(code, message); } protected internal override void OnSuccess() @@ -94,7 +125,7 @@ protected internal override void OnSuccess() if (executeListener != null) { object obj = ParseEndResult(); - executeListener.OnSuccess(key, obj); + executeListener.OnSuccess(Key, obj); } } diff --git a/AerospikeClient/Async/AsyncExists.cs b/AerospikeClient/Async/AsyncExists.cs index 4217acb5..4f1eea6a 100644 --- a/AerospikeClient/Async/AsyncExists.cs +++ b/AerospikeClient/Async/AsyncExists.cs @@ -17,28 +17,21 @@ namespace Aerospike.Client { - public sealed class AsyncExists : AsyncSingleCommand + public sealed class AsyncExists : AsyncReadBase { private readonly ExistsListener listener; - private readonly Key key; - private readonly Partition partition; private bool exists; public AsyncExists(AsyncCluster cluster, Policy policy, Key key, ExistsListener listener) - : base(cluster, policy) + : base(cluster, policy, key) { this.listener = listener; - this.key = key; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); } public AsyncExists(AsyncExists other) : base(other) { this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; } protected internal override AsyncCommand CloneCommand() @@ -46,35 +39,26 @@ protected internal override AsyncCommand CloneCommand() return new AsyncExists(this); } - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; - } - protected internal override void WriteBuffer() { SetExists(policy, key); } - protected internal override void ParseResult() + protected internal override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; + ParseHeader(); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { exists = true; - return; + return true; } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { exists = false; - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -84,18 +68,12 @@ protected internal override void ParseResult() throw new AerospikeException(resultCode); } exists = true; - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - protected internal override void OnSuccess() { if (listener != null) diff --git a/AerospikeClient/Async/AsyncMultiCommand.cs b/AerospikeClient/Async/AsyncMultiCommand.cs index a520d9f5..3af3225b 100644 --- a/AerospikeClient/Async/AsyncMultiCommand.cs +++ b/AerospikeClient/Async/AsyncMultiCommand.cs @@ -1,142 +1,137 @@ -/* - * Copyright 2012-2022 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Collections.Generic; - -namespace Aerospike.Client -{ - public abstract class AsyncMultiCommand : AsyncCommand - { - protected internal readonly AsyncNode serverNode; - protected internal int info3; - protected internal int resultCode; - protected internal int generation; - protected internal int expiration; - protected internal int batchIndex; - protected internal int fieldCount; - protected internal int opCount; - protected internal readonly bool isOperation; - protected internal volatile bool valid = true; - - /// - /// Batch constructor. - /// - public AsyncMultiCommand(AsyncCluster cluster, Policy policy, AsyncNode node, bool isOperation) - : base(cluster, policy) - { - this.serverNode = node; - this.isOperation = isOperation; - } - - /// - /// Scan/Query constructor. - /// - public AsyncMultiCommand(AsyncCluster cluster, Policy policy, AsyncNode node, int socketTimeout, int totalTimeout) - : base(cluster, policy, socketTimeout, totalTimeout) - { - this.serverNode = node; - this.isOperation = false; - } - - public AsyncMultiCommand(AsyncMultiCommand other) : base(other) - { - this.serverNode = other.serverNode; - this.isOperation = other.isOperation; - } - - protected internal sealed override void ParseCommand() - { - if (!valid) - { - throw new AerospikeException.QueryTerminated(); - } - - if (ParseGroup()) - { - Finish(); - return; - } - - // Prepare for next group. - ReceiveNext(); - } - - protected internal override Node GetNode(Cluster cluster) - { - return serverNode; - } - - protected internal override bool PrepareRetry(bool timeout) - { - return true; - } - - private bool ParseGroup() - { - // Parse each message response and add it to the result array - while (dataOffset < dataLength) - { - dataOffset += 3; - info3 = dataBuffer[dataOffset]; - dataOffset += 2; - resultCode = dataBuffer[dataOffset]; - - // If this is the end marker of the response, do not proceed further. - if ((info3 & Command.INFO3_LAST) != 0) - { - if (resultCode != 0) - { - // The server returned a fatal error. - throw new AerospikeException(resultCode); - } - return true; - } - - dataOffset++; - generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - batchIndex = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - - ParseRow(); - } - return false; - } - - protected internal abstract void ParseRow(); - - protected internal Record ParseRecord() - { - if (opCount <= 0) - { - return new Record(null, generation, expiration); - } - - return policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - } - - protected internal void Stop() - { - valid = false; - } - } -} +/* + * Copyright 2012-2022 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System.Collections.Generic; + +namespace Aerospike.Client +{ + public abstract class AsyncMultiCommand : AsyncCommand + { + protected internal readonly AsyncNode serverNode; + protected internal int info3; + protected internal int batchIndex; + protected internal readonly bool isOperation; + protected internal volatile bool valid = true; + + /// + /// Batch constructor. + /// + public AsyncMultiCommand(AsyncCluster cluster, Policy policy, AsyncNode node, bool isOperation) + : base(cluster, policy) + { + this.serverNode = node; + this.isOperation = isOperation; + } + + /// + /// Scan/Query constructor. + /// + public AsyncMultiCommand(AsyncCluster cluster, Policy policy, AsyncNode node, int socketTimeout, int totalTimeout) + : base(cluster, policy, socketTimeout, totalTimeout) + { + this.serverNode = node; + this.isOperation = false; + } + + public AsyncMultiCommand(AsyncMultiCommand other) : base(other) + { + this.serverNode = other.serverNode; + this.isOperation = other.isOperation; + } + + protected internal sealed override void ParseCommand() + { + if (!valid) + { + throw new AerospikeException.QueryTerminated(); + } + + if (ParseGroup()) + { + Finish(); + return; + } + + // Prepare for next group. + ReceiveNext(); + } + + protected internal override Node GetNode(Cluster cluster) + { + return serverNode; + } + + protected internal override bool PrepareRetry(bool timeout) + { + return true; + } + + private bool ParseGroup() + { + // Parse each message response and add it to the result array + while (dataOffset < dataLength) + { + dataOffset += 3; + info3 = dataBuffer[dataOffset]; + dataOffset += 2; + resultCode = dataBuffer[dataOffset]; + + // If this is the end marker of the response, do not proceed further. + if ((info3 & Command.INFO3_LAST) != 0) + { + if (resultCode != 0) + { + // The server returned a fatal error. + throw new AerospikeException(resultCode); + } + return true; + } + + dataOffset++; + generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + batchIndex = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + + ParseRow(); + } + return false; + } + + protected internal abstract void ParseRow(); + + protected internal Record ParseRecord() + { + if (opCount <= 0) + { + return new Record(null, generation, expiration); + } + + return policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); + } + + protected internal void Stop() + { + valid = false; + } + } +} diff --git a/AerospikeClient/Async/AsyncOperateRead.cs b/AerospikeClient/Async/AsyncOperateRead.cs new file mode 100644 index 00000000..076fc4ca --- /dev/null +++ b/AerospikeClient/Async/AsyncOperateRead.cs @@ -0,0 +1,41 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncOperateRead : AsyncRead + { + private readonly OperateArgs args; + + public AsyncOperateRead(AsyncCluster cluster, RecordListener listener, Key key, OperateArgs args) + : base(cluster, args.writePolicy, listener, key, true) + { + this.args = args; + } + + public AsyncOperateRead(AsyncOperateRead other) + : base(other) + { + this.args = other.args; + } + + protected internal override void WriteBuffer() + { + SetOperate(args.writePolicy, key, args); + } + } +} diff --git a/AerospikeClient/Async/AsyncOperateWrite.cs b/AerospikeClient/Async/AsyncOperateWrite.cs new file mode 100644 index 00000000..11e7209a --- /dev/null +++ b/AerospikeClient/Async/AsyncOperateWrite.cs @@ -0,0 +1,90 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using System; + +namespace Aerospike.Client +{ + public sealed class AsyncOperateWrite : AsyncWriteBase + { + private readonly RecordListener listener; + private readonly OperateArgs args; + private Record record; + + public AsyncOperateWrite(AsyncCluster cluster, RecordListener listener, Key key, OperateArgs args) + : base(cluster, args.writePolicy, key) + { + this.listener = listener; + this.args = args; + } + + public AsyncOperateWrite(AsyncOperateWrite other) + : base(other) + { + this.args = other.args; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncOperateWrite(this); + } + + protected internal override void WriteBuffer() + { + SetOperate(args.writePolicy, Key, args); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK) + { + record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, true); + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key, record); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} diff --git a/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs b/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs index 65e4d5d5..68942fd1 100644 --- a/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs +++ b/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs @@ -14,8 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System; -using System.Collections.Generic; namespace Aerospike.Client { @@ -41,7 +39,7 @@ PartitionTracker tracker this.statement = statement; this.tracker = tracker; - cluster.AddTran(); + cluster.AddCommandCount(); tracker.SleepBetweenRetries = 0; taskId = statement.PrepareTaskId(); QueryPartitions(); diff --git a/AerospikeClient/Async/AsyncRead.cs b/AerospikeClient/Async/AsyncRead.cs index 5cb5ebb8..b83b3a22 100644 --- a/AerospikeClient/Async/AsyncRead.cs +++ b/AerospikeClient/Async/AsyncRead.cs @@ -17,59 +17,37 @@ namespace Aerospike.Client { - public class AsyncRead : AsyncSingleCommand + public class AsyncRead : AsyncReadBase { private readonly RecordListener listener; - protected internal readonly Key key; private readonly string[] binNames; private readonly bool isOperation; - protected readonly Partition partition; protected Record record; // Read constructor. public AsyncRead(AsyncCluster cluster, Policy policy, RecordListener listener, Key key, string[] binNames) - : base(cluster, policy) + : base(cluster, policy, key) { this.listener = listener; - this.key = key; this.binNames = binNames; this.isOperation = false; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); - } - - // UDF constructor. - public AsyncRead(AsyncCluster cluster, WritePolicy policy, Key key) - : base(cluster, policy) - { - this.listener = null; - this.key = key; - this.binNames = null; - this.isOperation = false; - this.partition = Partition.Write(cluster, policy, key); - cluster.AddTran(); } // Operate constructor. - public AsyncRead(AsyncCluster cluster, Policy policy, RecordListener listener, Key key, Partition partition, bool isOperation) - : base(cluster, policy) + public AsyncRead(AsyncCluster cluster, Policy policy, RecordListener listener, Key key, bool isOperation) + : base(cluster, policy, key) { this.listener = listener; - this.key = key; this.binNames = null; this.isOperation = isOperation; - this.partition = partition; - cluster.AddTran(); } public AsyncRead(AsyncRead other) : base(other) { this.listener = other.listener; - this.key = other.key; this.binNames = other.binNames; this.isOperation = other.isOperation; - this.partition = other.partition; } protected internal override AsyncCommand CloneCommand() @@ -77,47 +55,25 @@ protected internal override AsyncCommand CloneCommand() return new AsyncRead(this); } - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; - } - protected internal override void WriteBuffer() { SetRead(policy, key, binNames); } - protected internal sealed override void ParseResult() + protected internal sealed override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; - int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset + 6); - int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset + 10); - int fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 18); - int opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 20); - dataOffset += Command.MSG_REMAINING_HEADER_SIZE; + ParseHeader(); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { - if (opCount == 0) - { - // Bin data was not returned. - record = new Record(null, generation, expiration); - return; - } - SkipKey(fieldCount); record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - return; + return true; } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - HandleNotFound(resultCode); - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -126,59 +82,12 @@ record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, ge { throw new AerospikeException(resultCode); } - return; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - SkipKey(fieldCount); - record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - HandleUdfError(resultCode); - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - - protected internal virtual void HandleNotFound(int resultCode) - { - // Do nothing in default case. Record will be null. - } - - private void HandleUdfError(int resultCode) - { - object obj; - - if (!record.bins.TryGetValue("FAILURE", out obj)) - { - throw new AerospikeException(resultCode); - } - - string ret = (string)obj; - string message; - int code; - - try - { - string[] list = ret.Split(':'); - code = Convert.ToInt32(list[2].Trim()); - message = list[0] + ':' + list[1] + ' ' + list[3]; - } - catch (Exception) - { - // Use generic exception if parse error occurs. - throw new AerospikeException(resultCode, ret); - } - - throw new AerospikeException(code, message); - } - protected internal override void OnSuccess() { if (listener != null) diff --git a/AerospikeClient/Async/AsyncReadBase.cs b/AerospikeClient/Async/AsyncReadBase.cs new file mode 100644 index 00000000..4ac55d47 --- /dev/null +++ b/AerospikeClient/Async/AsyncReadBase.cs @@ -0,0 +1,65 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class AsyncReadBase : AsyncSingleCommand + { + protected internal readonly Key key; + protected readonly Partition partition; + + public AsyncReadBase(AsyncCluster cluster, Policy policy, Key key) + : base(cluster, policy) + { + this.key = key; + this.partition = Partition.Read(cluster, policy, key); + cluster.AddCommandCount(); + } + + public AsyncReadBase(AsyncReadBase other) + : base(other) + { + this.key = other.key; + this.partition = other.partition; + } + + protected internal override bool IsWrite() + { + return false; + } + + protected internal override Node GetNode(Cluster cluster) + { + return partition.GetNodeRead(cluster); + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.READ; + } + + protected internal override bool PrepareRetry(bool timeout) + { + partition.PrepareRetryRead(timeout); + return true; + } + + protected internal abstract override void WriteBuffer(); + + protected internal abstract override bool ParseResult(); + } +} diff --git a/AerospikeClient/Async/AsyncReadHeader.cs b/AerospikeClient/Async/AsyncReadHeader.cs index f65f4e35..d46df6ea 100644 --- a/AerospikeClient/Async/AsyncReadHeader.cs +++ b/AerospikeClient/Async/AsyncReadHeader.cs @@ -17,28 +17,22 @@ namespace Aerospike.Client { - public sealed class AsyncReadHeader : AsyncSingleCommand + public sealed class AsyncReadHeader : AsyncReadBase { private readonly RecordListener listener; - private readonly Key key; - private readonly Partition partition; private Record record; public AsyncReadHeader(AsyncCluster cluster, Policy policy, RecordListener listener, Key key) - : base(cluster, policy) + : base(cluster, policy, key) { this.listener = listener; - this.key = key; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); + cluster.AddCommandCount(); } public AsyncReadHeader(AsyncReadHeader other) : base(other) { this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; } protected internal override AsyncCommand CloneCommand() @@ -46,37 +40,25 @@ protected internal override AsyncCommand CloneCommand() return new AsyncReadHeader(this); } - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; - } - protected internal override void WriteBuffer() { SetReadHeader(policy, key); } - protected internal override void ParseResult() + protected internal override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; + ParseHeader(); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { - int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset + 6); - int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset + 10); - record = new Record(null, generation, expiration); - return; + return true; } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -85,18 +67,12 @@ protected internal override void ParseResult() { throw new AerospikeException(resultCode); } - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - protected internal override void OnSuccess() { if (listener != null) diff --git a/AerospikeClient/Async/AsyncScanPartitionExecutor.cs b/AerospikeClient/Async/AsyncScanPartitionExecutor.cs index fd43e5d9..704e25d8 100644 --- a/AerospikeClient/Async/AsyncScanPartitionExecutor.cs +++ b/AerospikeClient/Async/AsyncScanPartitionExecutor.cs @@ -14,8 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System; -using System.Collections.Generic; namespace Aerospike.Client { @@ -46,7 +44,7 @@ PartitionTracker tracker this.binNames = binNames; this.tracker = tracker; - cluster.AddTran(); + cluster.AddCommandCount(); tracker.SleepBetweenRetries = 0; ScanPartitions(); } diff --git a/AerospikeClient/Async/AsyncSingleCommand.cs b/AerospikeClient/Async/AsyncSingleCommand.cs index 6ce6abf2..696b21eb 100644 --- a/AerospikeClient/Async/AsyncSingleCommand.cs +++ b/AerospikeClient/Async/AsyncSingleCommand.cs @@ -1,40 +1,51 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public abstract class AsyncSingleCommand : AsyncCommand - { - public AsyncSingleCommand(AsyncCluster cluster, Policy policy) - : base(cluster, policy) - { - } - - public AsyncSingleCommand(AsyncSingleCommand other) - : base(other) - { - } - - protected internal sealed override void ParseCommand() - { - ParseResult(); - Finish(); - } - - protected internal abstract void ParseResult(); - } -} +/* + * Copyright 2012-2023 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class AsyncSingleCommand : AsyncCommand + { + public AsyncSingleCommand(AsyncCluster cluster, Policy policy) + : base(cluster, policy) + { + } + + public AsyncSingleCommand(AsyncSingleCommand other) + : base(other) + { + } + + protected internal sealed override void ParseCommand() + { + ParseResult(); + Finish(); + } + + protected void ParseHeader() + { + resultCode = dataBuffer[dataOffset + 5]; + generation = ByteUtil.BytesToInt(dataBuffer, dataOffset + 6); + expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset + 10); + fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 18); + opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 20); + dataOffset += Command.MSG_REMAINING_HEADER_SIZE; + } + + protected internal abstract bool ParseResult(); + } +} + \ No newline at end of file diff --git a/AerospikeClient/Async/AsyncTouch.cs b/AerospikeClient/Async/AsyncTouch.cs index 6ca83d1b..0d0a64b8 100644 --- a/AerospikeClient/Async/AsyncTouch.cs +++ b/AerospikeClient/Async/AsyncTouch.cs @@ -1,148 +1,114 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class AsyncTouch : AsyncSingleCommand - { - private readonly WritePolicy writePolicy; - private readonly WriteListener listener; - private readonly ExistsListener existsListener; - private readonly Key key; - private readonly Partition partition; - private bool touched; - - public AsyncTouch(AsyncCluster cluster, WritePolicy writePolicy, WriteListener listener, Key key) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.listener = listener; - this.existsListener = null; - this.key = key; - this.partition = Partition.Write(cluster, policy, key); - cluster.AddTran(); - } - - public AsyncTouch(AsyncCluster cluster, WritePolicy writePolicy, ExistsListener listener, Key key) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.listener = null; - this.existsListener = listener; - this.key = key; - this.partition = Partition.Write(cluster, policy, key); - cluster.AddTran(); - } - - public AsyncTouch(AsyncTouch other) - : base(other) - { - this.writePolicy = other.writePolicy; - this.listener = other.listener; - this.existsListener = other.existsListener; - this.key = other.key; - this.partition = other.partition; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncTouch(this); - } - - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - - protected internal override void WriteBuffer() - { - SetTouch(writePolicy, key); - } - - protected internal override void ParseResult() - { - int resultCode = dataBuffer[dataOffset + 5]; - - if (resultCode == 0) - { - touched = true; - return; - } - - touched = false; - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) - { - if (existsListener == null) - { - throw new AerospikeException(resultCode); - } - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (policy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - return; - } - - throw new AerospikeException(resultCode); - } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - - protected internal override void OnSuccess() - { - if (listener != null) - { - listener.OnSuccess(key); - } - else if (existsListener != null) - { - existsListener.OnSuccess(key, touched); - } - } - - protected internal override void OnFailure(AerospikeException e) - { - if (listener != null) - { - listener.OnFailure(e); - } - else if (existsListener != null) - { - existsListener.OnFailure(e); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncTouch : AsyncWriteBase + { + private readonly WriteListener listener; + private readonly ExistsListener existsListener; + private bool touched; + + public AsyncTouch(AsyncCluster cluster, WritePolicy writePolicy, WriteListener listener, Key key) + : base(cluster, writePolicy, key) + { + this.listener = listener; + this.existsListener = null; + } + + public AsyncTouch(AsyncCluster cluster, WritePolicy writePolicy, ExistsListener listener, Key key) + : base(cluster, writePolicy, key) + { + this.listener = null; + this.existsListener = listener; + } + + public AsyncTouch(AsyncTouch other) + : base(other) + { + this.listener = other.listener; + this.existsListener = other.existsListener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncTouch(this); + } + + protected internal override void WriteBuffer() + { + SetTouch(writePolicy, Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK) + { + touched = true; + return true; + } + + touched = false; + if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + if (existsListener == null) + { + throw new AerospikeException(resultCode); + } + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key); + } + else if (existsListener != null) + { + existsListener.OnSuccess(Key, touched); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + else if (existsListener != null) + { + existsListener.OnFailure(e); + } + } + } +} diff --git a/AerospikeClient/Async/AsyncOperate.cs b/AerospikeClient/Async/AsyncTxnAddKeys.cs similarity index 50% rename from AerospikeClient/Async/AsyncOperate.cs rename to AerospikeClient/Async/AsyncTxnAddKeys.cs index 736c7ec7..1f61021e 100644 --- a/AerospikeClient/Async/AsyncOperate.cs +++ b/AerospikeClient/Async/AsyncTxnAddKeys.cs @@ -17,68 +17,78 @@ namespace Aerospike.Client { - public sealed class AsyncOperate : AsyncRead + public sealed class AsyncTxnAddKeys : AsyncWriteBase { + private readonly RecordListener listener; private readonly OperateArgs args; + private readonly Txn txn; - public AsyncOperate(AsyncCluster cluster, RecordListener listener, Key key, OperateArgs args) - : base(cluster, args.writePolicy, listener, key, args.GetPartition(cluster, key), true) + public AsyncTxnAddKeys + ( + AsyncCluster cluster, + RecordListener listener, + Key key, + OperateArgs args, + Txn txn + ) : base(cluster, args.writePolicy, key) { + this.listener = listener; this.args = args; + this.txn = txn; } - public AsyncOperate(AsyncOperate other) + public AsyncTxnAddKeys(AsyncTxnAddKeys other) : base(other) { + this.listener = other.listener; this.args = other.args; + this.txn = other.txn; } protected internal override AsyncCommand CloneCommand() { - return new AsyncOperate(this); + return new AsyncTxnAddKeys(this); } - protected internal override bool IsWrite() + protected internal override void WriteBuffer() { - return args.hasWrite; + SetTxnAddKeys(args.writePolicy, Key, args); } - protected internal override Node GetNode(Cluster cluster) + protected internal override bool ParseResult() { - return args.hasWrite ? partition.GetNodeWrite(cluster) : partition.GetNodeRead(cluster); - } + ParseHeader(); + ParseTxnDeadline(txn); - protected override Latency.LatencyType GetLatencyType() - { - return args.hasWrite ? Latency.LatencyType.WRITE : Latency.LatencyType.READ; + if (resultCode == ResultCode.OK) + { + return true; + } + + throw new AerospikeException(resultCode); } - protected internal override void WriteBuffer() + protected internal override bool PrepareRetry(bool timeout) { - SetOperate(args.writePolicy, key, args); + partition.PrepareRetryWrite(timeout); + return true; } - protected internal override void HandleNotFound(int resultCode) + protected internal override void OnSuccess() { - // Only throw not found exception for command with write operations. - // Read-only command operations return a null record. - if (args.hasWrite) + if (listener != null) { - throw new AerospikeException(resultCode); + listener.OnSuccess(Key, null); } } - protected internal override bool PrepareRetry(bool timeout) + protected internal override void OnFailure(AerospikeException e) { - if (args.hasWrite) + if (listener != null) { - partition.PrepareRetryWrite(timeout); + listener.OnFailure(e); } - else - { - partition.PrepareRetryRead(timeout); - } - return true; } } } + diff --git a/AerospikeClient/Async/AsyncTxnClose.cs b/AerospikeClient/Async/AsyncTxnClose.cs new file mode 100644 index 00000000..03c2376c --- /dev/null +++ b/AerospikeClient/Async/AsyncTxnClose.cs @@ -0,0 +1,89 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncTxnClose : AsyncWriteBase + { + private readonly Txn txn; + private readonly DeleteListener listener; + + public AsyncTxnClose + ( + AsyncCluster cluster, + Txn txn, + DeleteListener listener, + WritePolicy writePolicy, + Key key + ) : base(cluster, writePolicy, key) + { + this.txn = txn; + this.listener = listener; + } + + public AsyncTxnClose(AsyncTxnClose other) + : base(other) + { + this.txn = other.txn; + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncTxnClose(this); + } + + protected internal override void WriteBuffer() + { + SetTxnClose(txn, Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key, true); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs new file mode 100644 index 00000000..33807800 --- /dev/null +++ b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs @@ -0,0 +1,87 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncTxnMarkRollForward : AsyncWriteBase + { + private readonly WriteListener listener; + + public AsyncTxnMarkRollForward + ( + AsyncCluster cluster, + WriteListener listener, + WritePolicy writePolicy, + Key key + ) : base(cluster, writePolicy, key) + { + this.listener = listener; + } + + public AsyncTxnMarkRollForward(AsyncTxnMarkRollForward other) + : base(other) + { + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncTxnMarkRollForward(this); + } + + protected internal override void WriteBuffer() + { + SetTxnMarkRollForward(Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + // MRT_COMMITTED is considered a success because it means a previous attempt already + // succeeded in notifying the server that the MRT will be rolled forward. + if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED) + { + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnMonitor.cs b/AerospikeClient/Async/AsyncTxnMonitor.cs new file mode 100644 index 00000000..e5901cac --- /dev/null +++ b/AerospikeClient/Async/AsyncTxnMonitor.cs @@ -0,0 +1,202 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +namespace Aerospike.Client +{ + public abstract class AsyncTxnMonitor + { + public static void Execute(AsyncCluster cluster, WritePolicy policy, AsyncWriteBase command) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + command.Execute(); + return; + } + + Txn txn = policy.Txn; + Key cmdKey = command.Key; + + if (txn.Writes.Contains(cmdKey)) + { + // MRT monitor already contains this key. Run original command. + command.Execute(); + return; + } + + // Add key to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTxnOps(txn, cmdKey); + SingleTxnMonitor stm = new(cluster, command); + stm.Execute(cluster, policy, ops); + } + + public static void ExecuteBatch( + BatchPolicy policy, + AsyncBatchExecutor executor, + Key[] keys + ) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + executor.Execute(executor.commands); + return; + } + + // Add write keys to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTxnOps(policy.Txn, keys); + BatchTxnMonitor ate = new(executor); + ate.Execute(executor.cluster, policy, ops); + } + + public static void ExecuteBatch( + BatchPolicy policy, + AsyncBatchExecutor executor, + List records + ) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + executor.Execute(); + return; + } + + // Add write keys to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTxnOps(policy.Txn, records); + + if (ops == null) + { + // Readonly batch does not need to add key digests. Run original command. + executor.Execute(); + return; + } + + BatchTxnMonitor ate = new(executor); + ate.Execute(executor.cluster, policy, ops); + } + + public sealed class SingleTxnMonitor : AsyncTxnMonitor + { + public SingleTxnMonitor(AsyncCluster cluster, AsyncWriteBase command) + : base(command, cluster) + { + } + + public override void RunCommand() + { + command.Execute(); + } + + public override void OnFailure(AerospikeException ae) + { + command.OnFailure(ae); + } + } + + public sealed class BatchTxnMonitor : AsyncTxnMonitor + { + private readonly AsyncBatchExecutor executor; + + public BatchTxnMonitor(AsyncBatchExecutor executor) + : base(null, null) + { + this.executor = executor; + } + + public override void RunCommand() + { + executor.Execute(); + } + + public override void OnFailure(AerospikeException ae) + { + executor.OnFailure(ae); + } + } + + readonly AsyncCommand command; + readonly AsyncCluster cluster; + + private AsyncTxnMonitor(AsyncCommand command, AsyncCluster cluster) + { + this.command = command; + this.cluster = cluster; + } + + void Execute(AsyncCluster cluster, Policy policy, Operation[] ops) + { + Txn txn = policy.Txn; + Key txnKey = TxnMonitor.GetTxnMonitorKey(policy.Txn); + WritePolicy wp = TxnMonitor.CopyTimeoutPolicy(policy); + + ExecuteRecordListener txnListener = new(this); + + // Add write key(s) to MRT monitor. + OperateArgs args = new(wp, null, null, ops); + AsyncTxnAddKeys txnCommand = new(cluster, txnListener, txnKey, args, txn); + txnCommand.Execute(); + } + + private void NotifyFailure(AerospikeException ae) + { + try + { + OnFailure(ae); + } + catch (Exception t) + { + Log.Error("notifyCommandFailure onFailure() failed: " + t.StackTrace); + } + } + + public abstract void OnFailure(AerospikeException ae); + public abstract void RunCommand(); + + private sealed class ExecuteRecordListener : RecordListener + { + private readonly AsyncTxnMonitor monitor; + + public ExecuteRecordListener(AsyncTxnMonitor monitor) + { + this.monitor = monitor; + } + + public void OnSuccess(Key key, Record record) + { + try + { + // Run original command. + monitor.RunCommand(); + } + catch (AerospikeException ae) + { + monitor.NotifyFailure(ae); + } + catch (Exception t) + { + monitor.NotifyFailure(new AerospikeException(t)); + } + } + + public void OnFailure(AerospikeException ae) + { + monitor.NotifyFailure(new AerospikeException(ResultCode.TXN_FAILED, "Failed to add key(s) to MRT monitor", ae)); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs new file mode 100644 index 00000000..f1474e00 --- /dev/null +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -0,0 +1,579 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using static Aerospike.Client.AbortStatus; +using static Aerospike.Client.CommitError; +using static Aerospike.Client.CommitStatus; + +namespace Aerospike.Client +{ + public sealed class AsyncTxnRoll + { + private readonly AsyncCluster cluster; + private readonly BatchPolicy verifyPolicy; + private readonly BatchPolicy rollPolicy; + private readonly WritePolicy writePolicy; + private readonly Txn txn; + private readonly Key txnKey; + private CommitListener commitListener; + private AbortListener abortListener; + private BatchRecord[] verifyRecords; + private BatchRecord[] rollRecords; + private AerospikeException verifyException; + + public AsyncTxnRoll + ( + AsyncCluster cluster, + BatchPolicy verifyPolicy, + BatchPolicy rollPolicy, + Txn txn + ) + { + this.cluster = cluster; + this.verifyPolicy = verifyPolicy; + this.rollPolicy = rollPolicy; + this.writePolicy = new WritePolicy(rollPolicy); + this.txn = txn; + this.txnKey = TxnMonitor.GetTxnMonitorKey(txn); + } + + public void Verify(CommitListener listener) + { + commitListener = listener; + Verify(new VerifyListener(this)); + } + + public void Commit(CommitListener listener) + { + commitListener = listener; + Commit(); + } + + private void Commit() + { + if (txn.MonitorExists()) + { + MarkRollForward(); + } + else + { + txn.State = Txn.TxnState.COMMITTED; + CloseOnCommit(true); + } + } + + public void Abort(AbortListener listener) + { + abortListener = listener; + txn.State = Txn.TxnState.ABORTED; + + Roll(new RollListener(this), Command.INFO4_MRT_ROLL_BACK); + } + + private void Verify(BatchRecordArrayListener verifyListener) + { + // Validate record versions in a batch. + BatchRecord[] records = null; + Key[] keys = null; + long?[] versions = null; + + bool actionPerformed = txn.Reads.PerformActionOnEachElement(max => + { + if (max == 0) return false; + + records = new BatchRecord[max]; + keys = new Key[max]; + versions = new long?[max]; + return true; + }, + (key, value, count) => + { + keys[count] = key; + records[count] = new BatchRecord(key, false); + versions[count] = value; + }); + + if (!actionPerformed) // If no action was performed, there are no elements. Return. + { + verifyListener.OnSuccess(new BatchRecord[0], true); + return; + } + + this.verifyRecords = records; + + AsyncBatchTxnVerifyExecutor executor = new(cluster, verifyPolicy, verifyListener, keys, versions, records); + executor.Execute(); + } + + private void MarkRollForward() + { + // Tell MRT monitor that a roll-forward will commence. + try + { + MarkRollForwardListener writeListener = new(this); + AsyncTxnMarkRollForward command = new(cluster, writeListener, writePolicy, txnKey); + command.Execute(); + } + catch (Exception e) + { + NotifyMarkRollForwardFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, e); + } + } + + private void RollForward() + { + try + { + RollForwardListener rollListener = new(this); + Roll(rollListener, Command.INFO4_MRT_ROLL_FORWARD); + } + catch (Exception) + { + NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + } + + private void RollBack() + { + try + { + RollBackListener rollListener = new(this); + Roll(rollListener, Command.INFO4_MRT_ROLL_BACK); + } + catch (Exception e) + { + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, e); + } + } + + private void Roll(BatchRecordArrayListener rollListener, int txnAttr) + { + + BatchRecord[] records = null; + Key[] keys = null; + + bool actionPerformed = txn.Writes.PerformActionOnEachElement(max => + { + if (max == 0) return false; + + records = new BatchRecord[max]; + keys = new Key[max]; + return true; + }, + (item, count) => + { + keys[count] = item; + records[count] = new BatchRecord(item, true); + }); + + if (!actionPerformed) + { + rollListener.OnSuccess(new BatchRecord[0], true); + return; + } + + BatchAttr attr = new(); + attr.SetTxn(txnAttr); + + AsyncBatchTxnRollExecutor executor = new(cluster, rollPolicy, rollListener, txn, keys, records, attr); + executor.Execute(); + } + + private void CloseOnCommit(bool verified) + { + if (!txn.CloseMonitor()) + { + if (verified) + { + NotifyCommitSuccess(CommitStatusType.OK); + } + else + { + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null); + } + return; + } + + try + { + AsyncTxnClose command = new(cluster, txn, new CloseOnCommitListener(this, verified), writePolicy, txnKey); + command.Execute(); + } + catch (Exception e) + { + if (verified) + { + NotifyCommitSuccess(CommitStatusType.CLOSE_ABANDONED); + } + else + { + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e); + } + } + } + + private void CloseOnAbort() + { + if (!txn.CloseMonitor()) + { + // There is no MRT monitor record to remove. + NotifyAbortSuccess(AbortStatusType.OK); + return; + } + + try + { + CloseOnAbortListener deleteListener = new(this); + AsyncTxnClose command = new(cluster, txn, deleteListener, writePolicy, txnKey); + command.Execute(); + } + catch (Exception) + { + NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); + } + } + + private void NotifyCommitSuccess(CommitStatusType status) + { + txn.Clear(); + + try + { + commitListener.OnSuccess(status); + } + catch (Exception t) + { + Log.Error("CommitListener OnSuccess() failed: " + t.StackTrace); + } + } + + private void NotifyCommitFailure(CommitErrorType error, Exception cause) + { + AerospikeException.Commit aec = CreateCommitException(error, cause); + + if (verifyException != null) + { + if (cause == null) + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, verifyException); + } + else + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, new[] { cause, verifyException }); + } + } + else if (cause != null) + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + } + else + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords); + } + + NotifyCommitFailure(aec); + } + + private void NotifyMarkRollForwardFailure(CommitErrorType error, Exception cause) + { + AerospikeException.Commit aec = CreateCommitException(error, cause); + + if (cause is AerospikeException) + { + AerospikeException ae = (AerospikeException)cause; + + if (ae.Result == ResultCode.MRT_ABORTED) + { + aec.SetInDoubt(false); + txn.InDoubt = false; + txn.State = Txn.TxnState.ABORTED; + } + else if (txn.InDoubt) + { + // The transaction was already InDoubt and just failed again, + // so the new exception should also be InDoubt. + aec.SetInDoubt(true); + } + else if (ae.InDoubt) + { + // The current exception is InDoubt. + aec.SetInDoubt(true); + txn.InDoubt = true; + } + } + else + { + if (txn.InDoubt) + { + aec.SetInDoubt(true); + } + } + + NotifyCommitFailure(aec); + } + + private AerospikeException.Commit CreateCommitException(CommitErrorType error, Exception cause) + { + if (cause != null) + { + AerospikeException.Commit aec = new(error, verifyRecords, rollRecords, cause); + + if (cause is AerospikeException) + { + AerospikeException src = (AerospikeException)cause; + aec.Node = src.Node; + aec.Policy = src.Policy; + aec.Iteration = src.Iteration; + aec.SetInDoubt(src.InDoubt); + } + return aec; + } + else + { + return new AerospikeException.Commit(error, verifyRecords, rollRecords); + } + } + + private void NotifyCommitFailure(AerospikeException.Commit aec) + { + try + { + commitListener.OnFailure(aec); + } + catch (Exception e) + { + Log.Error("CommitListener OnFailure() failed: " + e.StackTrace); + } + } + + private void NotifyAbortSuccess(AbortStatusType status) + { + txn.Clear(); + + try + { + abortListener.OnSuccess(status); + } + catch (Exception e) + { + Log.Error("AbortListener OnSuccess() failed: " + e.StackTrace); + } + } + + private sealed class VerifyListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public VerifyListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.verifyRecords = records; + + if (status) + { + command.txn.State = Txn.TxnState.VERIFIED; + command.Commit(); + } + else + { + command.txn.State = Txn.TxnState.ABORTED; + command.RollBack(); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.verifyRecords = records; + command.verifyException = ae; + command.txn.State = Txn.TxnState.ABORTED; + command.RollBack(); + } + }; + + private sealed class RollListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnAbort(); + } + else + { + command.NotifyAbortSuccess(AbortStatusType.ROLL_BACK_ABANDONED); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyAbortSuccess(AbortStatusType.ROLL_BACK_ABANDONED); + } + }; + + private sealed class MarkRollForwardListener : WriteListener + { + private readonly AsyncTxnRoll command; + + public MarkRollForwardListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(Key key) + { + command.txn.State = Txn.TxnState.COMMITTED; + command.txn.InDoubt = false; + command.RollForward(); + } + + public void OnFailure(AerospikeException ae) + { + command.NotifyMarkRollForwardFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, ae); + } + }; + + private sealed class RollForwardListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollForwardListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnCommit(true); + } + else + { + command.NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + }; + + private sealed class RollBackListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollBackListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnCommit(false); + } + else + { + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, null); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, ae); + } + }; + + + private sealed class CloseOnCommitListener : DeleteListener + { + private readonly AsyncTxnRoll command; + private readonly bool verified; + + public CloseOnCommitListener(AsyncTxnRoll command, bool verified) + { + this.command = command; + this.verified = verified; + } + + public void OnSuccess(Key key, bool existed) + { + if (verified) + { + command.NotifyCommitSuccess(CommitStatusType.OK); + } + else + { + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null); + } + } + + public void OnFailure(AerospikeException ae) + { + if (verified) + { + command.NotifyCommitSuccess(CommitStatusType.CLOSE_ABANDONED); + } + else + { + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, ae); + } + } + }; + private sealed class CloseOnAbortListener : DeleteListener + { + private readonly AsyncTxnRoll command; + + public CloseOnAbortListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(Key key, bool existed) + { + command.NotifyAbortSuccess(AbortStatusType.OK); + } + + public void OnFailure(AerospikeException ae) + { + command.NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); + } + }; + } +} + diff --git a/AerospikeClient/Async/AsyncWrite.cs b/AerospikeClient/Async/AsyncWrite.cs index f95ce6a1..0b2f03d0 100644 --- a/AerospikeClient/Async/AsyncWrite.cs +++ b/AerospikeClient/Async/AsyncWrite.cs @@ -1,127 +1,98 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class AsyncWrite : AsyncSingleCommand - { - private readonly WritePolicy writePolicy; - private readonly WriteListener listener; - private readonly Key key; - private readonly Partition partition; - private readonly Bin[] bins; - private readonly Operation.Type operation; - - public AsyncWrite - ( - AsyncCluster cluster, - WritePolicy writePolicy, - WriteListener listener, - Key key, - Bin[] bins, - Operation.Type operation - ) : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.listener = listener; - this.key = key; - this.partition = Partition.Write(cluster, policy, key); - this.bins = bins; - this.operation = operation; - cluster.AddTran(); - } - - public AsyncWrite(AsyncWrite other) - : base(other) - { - this.writePolicy = other.writePolicy; - this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; - this.bins = other.bins; - this.operation = other.operation; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncWrite(this); - } - - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - - protected internal override void WriteBuffer() - { - SetWrite(writePolicy, operation, key, bins); - } - - protected internal override void ParseResult() - { - int resultCode = dataBuffer[dataOffset + 5]; - - if (resultCode == 0) - { - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (policy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - return; - } - - throw new AerospikeException(resultCode); - } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - - protected internal override void OnSuccess() - { - if (listener != null) - { - listener.OnSuccess(key); - } - } - - protected internal override void OnFailure(AerospikeException e) - { - if (listener != null) - { - listener.OnFailure(e); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncWrite : AsyncWriteBase + { + private readonly WriteListener listener; + private readonly Bin[] bins; + private readonly Operation.Type operation; + + public AsyncWrite + ( + AsyncCluster cluster, + WritePolicy writePolicy, + WriteListener listener, + Key key, + Bin[] bins, + Operation.Type operation + ) : base(cluster, writePolicy, key) + { + this.listener = listener; + this.bins = bins; + this.operation = operation; + } + + public AsyncWrite(AsyncWrite other) + : base(other) + { + this.listener = other.listener; + this.bins = other.bins; + this.operation = other.operation; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncWrite(this); + } + + protected internal override void WriteBuffer() + { + SetWrite(writePolicy, operation, Key, bins); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK) + { + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncWriteBase.cs b/AerospikeClient/Async/AsyncWriteBase.cs new file mode 100644 index 00000000..157ade10 --- /dev/null +++ b/AerospikeClient/Async/AsyncWriteBase.cs @@ -0,0 +1,80 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class AsyncWriteBase : AsyncSingleCommand + { + protected readonly WritePolicy writePolicy; + public Key Key { get; private set; } + protected readonly Partition partition; + + public AsyncWriteBase + ( + AsyncCluster cluster, + WritePolicy writePolicy, + Key key + ) : base(cluster, writePolicy) + { + this.writePolicy = writePolicy; + this.Key = key; + this.partition = Partition.Write(cluster, policy, key); + cluster.AddCommandCount(); + } + + public AsyncWriteBase(AsyncWriteBase other) + : base(other) + { + this.writePolicy = other.writePolicy; + this.Key = other.Key; + this.partition = other.partition; + } + + protected internal override bool IsWrite() + { + return true; + } + + protected internal override Node GetNode(Cluster cluster) + { + return partition.GetNodeWrite(cluster); + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.WRITE; + } + + protected internal override bool PrepareRetry(bool timeout) + { + partition.PrepareRetryWrite(timeout); + return true; + } + + protected internal override void OnInDoubt() + { + if (writePolicy.Txn != null) + { + writePolicy.Txn.OnWriteInDoubt(Key); + } + } + + protected internal abstract override void WriteBuffer(); + + protected internal abstract override bool ParseResult(); + } +} \ No newline at end of file diff --git a/AerospikeClient/Async/IAsyncClient.cs b/AerospikeClient/Async/IAsyncClient.cs index 90153356..0cfd5b5d 100644 --- a/AerospikeClient/Async/IAsyncClient.cs +++ b/AerospikeClient/Async/IAsyncClient.cs @@ -14,9 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; namespace Aerospike.Client { @@ -39,7 +36,56 @@ namespace Aerospike.Client /// /// public interface IAsyncClient : IAerospikeClient - { + { + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Asynchronously attempt to commit the given multi-record transaction. + /// Create listener, call asynchronous commit and return task monitor. + /// + /// multi-record transaction + /// cancellation token + public Task Commit(Txn txn, CancellationToken token); + + /// + /// Asynchronously attempt to commit the given multi-record transaction. First, the expected + /// record versions are sent to the server nodes for verification. If all nodes return success, + /// the transaction is committed. Otherwise, the transaction is aborted. + /// + /// Schedules the commit command with a channel selector and return. + /// Another thread will process the command and send the results to the listener. + /// + /// + /// Requires server version 8.0+ + /// + /// + /// where to send results + /// multi-record transaction + void Commit(CommitListener listener, Txn txn); + + /// + /// Asynchronously attempt to abort and rollback the given multi-record transaction. + /// Create listener, call asynchronous commit and return task monitor. + /// + /// multi-record transaction + /// cancellation token + public Task Abort(Txn txn, CancellationToken token); + + /// + /// Asynchronously abort and rollback the given multi-record transaction. + /// + /// Schedules the abort command with a channel selector and return. + /// Another thread will process the command and send the results to the listener. + /// + /// Requires server version 8.0+ + /// + /// + /// where to send results + /// multi-record transaction + void Abort(AbortListener listener, Txn txn); + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- @@ -48,7 +94,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously write record bin(s). /// Create listener, call asynchronous put and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -64,7 +110,7 @@ public interface IAsyncClient : IAerospikeClient /// Schedules the put command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -83,7 +129,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously append bin string values to existing record bin values. /// Create listener, call asynchronous append and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -100,7 +146,7 @@ public interface IAsyncClient : IAerospikeClient /// Schedule the append command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -116,7 +162,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously prepend bin string values to existing record bin values. /// Create listener, call asynchronous prepend and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -133,7 +179,7 @@ public interface IAsyncClient : IAerospikeClient /// Schedule the prepend command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -153,7 +199,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously add integer bin values to existing record bin values. /// Create listener, call asynchronous add and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for integer values. /// @@ -170,7 +216,7 @@ public interface IAsyncClient : IAerospikeClient /// Schedule the add command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -189,7 +235,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously delete record for specified key. /// Create listener, call asynchronous delete and return task monitor. /// - /// The policy specifies the transaction timeout. + /// The policy specifies the command timeout. /// /// /// delete configuration parameters, pass in null for defaults @@ -880,7 +926,7 @@ public interface IAsyncClient : IAerospikeClient /// server package name where user defined function resides /// user defined function /// arguments passed in to user defined function - /// if transaction fails + /// if command fails void Execute(WritePolicy policy, ExecuteListener listener, Key key, string packageName, string functionName, params Value[] functionArgs); /// diff --git a/AerospikeClient/AsyncTask/AbortListenerAdapter.cs b/AerospikeClient/AsyncTask/AbortListenerAdapter.cs new file mode 100644 index 00000000..ce3de111 --- /dev/null +++ b/AerospikeClient/AsyncTask/AbortListenerAdapter.cs @@ -0,0 +1,33 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using static Aerospike.Client.AbortStatus; + +namespace Aerospike.Client +{ + internal sealed class AbortListenerAdapter : ListenerAdapter, AbortListener + { + public AbortListenerAdapter(CancellationToken token) + : base(token) + { + } + + public void OnSuccess(AbortStatusType status) + { + SetResult(status); + } + } +} diff --git a/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs b/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs index 6b2616fc..44c295f5 100644 --- a/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs +++ b/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs @@ -29,7 +29,7 @@ public BatchOperateListListenerAdapter(CancellationToken token) public void OnSuccess(List records, bool status) { // records is an argument to the async call, so the user already has access to it. - // Set completion status: true if all batch sub-transactions were successful. + // Set completion status: true if all batch sub-commands were successful. SetResult(status); } } diff --git a/AerospikeClient/AsyncTask/CommitListenerAdapter.cs b/AerospikeClient/AsyncTask/CommitListenerAdapter.cs new file mode 100644 index 00000000..11c5c46c --- /dev/null +++ b/AerospikeClient/AsyncTask/CommitListenerAdapter.cs @@ -0,0 +1,38 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using static Aerospike.Client.CommitStatus; + +namespace Aerospike.Client +{ + internal sealed class CommitListenerAdapter : ListenerAdapter, CommitListener + { + public CommitListenerAdapter(CancellationToken token) + : base(token) + { + } + + public void OnSuccess(CommitStatusType status) + { + SetResult(status); + } + + public void OnFailure(AerospikeException.Commit exception) + { + base.OnFailure(exception); + } + } +} diff --git a/AerospikeClient/Cluster/Cluster.cs b/AerospikeClient/Cluster/Cluster.cs index c6f35f6a..190c6b78 100644 --- a/AerospikeClient/Cluster/Cluster.cs +++ b/AerospikeClient/Cluster/Cluster.cs @@ -14,10 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System; -using System.Collections.Generic; using System.Text; -using System.Threading; namespace Aerospike.Client { @@ -97,7 +94,7 @@ public class Cluster // Login timeout. protected internal readonly int loginTimeout; - // Maximum socket idle to validate connections in transactions. + // Maximum socket idle to validate connections in commands. private readonly double maxSocketIdleMillisTran; // Maximum socket idle to trim peak connections to min connections. @@ -137,7 +134,7 @@ public class Cluster public MetricsPolicy MetricsPolicy; private volatile IMetricsListener metricsListener; private volatile int retryCount; - private volatile int tranCount; + private volatile int commandCount; private volatile int delayQueueTimeoutCount; public Cluster(ClientPolicy policy, Host[] hosts) @@ -274,6 +271,74 @@ public Cluster(ClientPolicy policy, Host[] hosts) cancelToken = cancel.Token; } + public void StartTendThread(ClientPolicy policy) + { + if (policy.forceSingleNode) + { + // Communicate with the first seed node only. + // Do not run cluster tend thread. + try + { + ForceSingleNode(); + } + catch (Exception) + { + Close(); + throw; + } + } + else + { + InitTendThread(policy.failIfNotConnected); + } + } + + public void ForceSingleNode() + { + // Initialize tendThread, but do not start it. + tendValid = true; + tendThread = new Thread(new ThreadStart(this.Run)); + + // Validate first seed. + Host seed = seeds[0]; + NodeValidator nv = new(); + Node node = null; + + try + { + node = nv.SeedNode(this, seed, null); + } + catch (Exception e) + { + throw new AerospikeException("Seed " + seed + " failed: " + e.Message, e); + } + + node.CreateMinConnections(); + + // Add seed node to nodes. + Dictionary nodesToAdd = new(1); + nodesToAdd[node.Name] = node; + AddNodes(nodesToAdd); + + // Initialize partitionMaps. + Peers peers = new(nodes.Length + 16); + node.RefreshPartitions(peers); + + // Set partition maps for all namespaces to point to same node. + foreach (Partitions partitions in partitionMap.Values) + { + foreach (Node[] nodeArray in partitions.replicas) + { + int max = nodeArray.Length; + + for (int i = 0; i < max; i++) + { + nodeArray[i] = node; + } + } + } + } + public virtual void InitTendThread(bool failIfNotConnected) { // Tend cluster until all nodes identified. @@ -1201,26 +1266,26 @@ private static bool SupportsPartitionQuery(Node[] nodes) } /// - /// Increment transaction count when metrics are enabled. + /// Increment command count when metrics are enabled. /// - public void AddTran() + public void AddCommandCount() { if (MetricsEnabled) { - Interlocked.Increment(ref tranCount); + Interlocked.Increment(ref commandCount); } } /// - /// Return transaction count. The value is cumulative and not reset per metrics interval. + /// Return command count. The value is cumulative and not reset per metrics interval. /// - public int GetTranCount() + public int GetCommandCount() { - return tranCount; + return commandCount; } /// - /// Increment transaction retry count. There can be multiple retries for a single transaction. + /// Increment command retry count. There can be multiple retries for a single command. /// public void AddRetry() { @@ -1228,7 +1293,7 @@ public void AddRetry() } /// - /// Add transaction retry count. There can be multiple retries for a single transaction. + /// Add command retry count. There can be multiple retries for a single command. /// public void AddRetries(int count) { @@ -1236,7 +1301,7 @@ public void AddRetries(int count) } /// - /// Return transaction retry count. The value is cumulative and not reset per metrics interval. + /// Return command retry count. The value is cumulative and not reset per metrics interval. /// public int GetRetryCount() { diff --git a/AerospikeClient/Cluster/ClusterStats.cs b/AerospikeClient/Cluster/ClusterStats.cs index 0196fdf3..7e7f855f 100644 --- a/AerospikeClient/Cluster/ClusterStats.cs +++ b/AerospikeClient/Cluster/ClusterStats.cs @@ -51,7 +51,7 @@ public sealed class ClusterStats public readonly int invalidNodeCount; /// - /// Count of transaction retires since cluster was started. + /// Count of command retries since cluster was started. /// public readonly long RetryCount; @@ -126,14 +126,14 @@ public sealed class NodeStats public readonly ConnectionStats asyncStats; /// - /// Transaction error count since node was initialized. If the error is retryable, multiple errors per - /// transaction may occur. + /// Command error count since node was initialized. If the error is retryable, multiple errors per + /// Command may occur. /// public readonly long ErrorCount; /// - /// Transaction timeout count since node was initialized. If the timeout is retryable (ie socketTimeout), - /// multiple timeouts per transaction may occur. + /// Command timeout count since node was initialized. If the timeout is retryable (ie socketTimeout), + /// multiple timeouts per Command may occur. /// public readonly long TimeoutCount; diff --git a/AerospikeClient/Cluster/ConnectionRecover.cs b/AerospikeClient/Cluster/ConnectionRecover.cs index 4127e1cc..5d1ffe1e 100644 --- a/AerospikeClient/Cluster/ConnectionRecover.cs +++ b/AerospikeClient/Cluster/ConnectionRecover.cs @@ -284,7 +284,7 @@ private void ParseProto(byte[] buf, int bytesRead) if (compressed) { // Do not recover connections with compressed data because that would - // require saving large buffers with associated state and performing decompression + // require saving large buffers with associated State and performing decompression // just to drain the connection. throw new AerospikeException("Recovering connections with compressed multi-record data is not supported"); } diff --git a/AerospikeClient/Cluster/Node.cs b/AerospikeClient/Cluster/Node.cs index 2ac8712a..d9887d30 100644 --- a/AerospikeClient/Cluster/Node.cs +++ b/AerospikeClient/Cluster/Node.cs @@ -740,7 +740,7 @@ public Connection GetConnection(int timeoutMillis, int timeoutDelay) { if (timeoutDelay > 0) { - // The connection state is always STATE_READ_AUTH_HEADER here which does not reference + // The connection State is always STATE_READ_AUTH_HEADER here which does not reference // isSingle, so just pass in true for isSingle in ConnectionRecover. cluster.RecoverConnection(new ConnectionRecover(conn, this, timeoutDelay, crt, true)); conn = null; @@ -993,8 +993,8 @@ public void ValidateErrorCount() } /// - /// Increment transaction error count. If the error is retryable, multiple errors per - /// transaction may occur. + /// Increment command error count. If the error is retryable, multiple errors per + /// command may occur. /// public void AddError() @@ -1003,8 +1003,8 @@ public void AddError() } /// - /// Increment transaction timeout count. If the timeout is retryable (ie socketTimeout), - /// multiple timeouts per transaction may occur. + /// Increment command timeout count. If the timeout is retryable (ie socketTimeout), + /// multiple timeouts per command may occur. /// public void AddTimeout() { @@ -1012,7 +1012,7 @@ public void AddTimeout() } /// - /// Return transaction error count. The value is cumulative and not reset per metrics interval. + /// Return command error count. The value is cumulative and not reset per metrics interval. /// public int GetErrorCount() { @@ -1020,7 +1020,7 @@ public int GetErrorCount() } /// - /// Return transaction timeout count. The value is cumulative and not reset per metrics interval. + /// Return command timeout count. The value is cumulative and not reset per metrics interval. /// public int GetTimeoutCount() { diff --git a/AerospikeClient/Cluster/NodeValidator.cs b/AerospikeClient/Cluster/NodeValidator.cs index cf398ca7..d5599f61 100644 --- a/AerospikeClient/Cluster/NodeValidator.cs +++ b/AerospikeClient/Cluster/NodeValidator.cs @@ -100,6 +100,11 @@ public Node SeedNode(Cluster cluster, Host host, Peers peers) private bool ValidatePeers(Peers peers, Node node) { + if (peers == null) + { + return true; + } + try { peers.refreshCount = 0; diff --git a/AerospikeClient/Command/Batch.cs b/AerospikeClient/Command/Batch.cs index 4ecbb5c8..ac7d7b0a 100644 --- a/AerospikeClient/Command/Batch.cs +++ b/AerospikeClient/Command/Batch.cs @@ -1,657 +1,842 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Collections; - -namespace Aerospike.Client -{ - //------------------------------------------------------- - // ReadList - //------------------------------------------------------- - - public sealed class BatchReadListCommand : BatchCommand - { - private readonly List records; - - public BatchReadListCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy policy, - List records, - BatchStatus status - ) : base(cluster, batch, policy, status, true) - { - this.records = records; - } - - protected internal override void WriteBuffer() - { - if (batch.node != null && batch.node.HasBatchAny) - { - SetBatchOperate(batchPolicy, records, batch); - } - else - { - SetBatchRead(batchPolicy, records, batch); - } - } - - protected internal override bool ParseRow() - { - SkipKey(fieldCount); - - BatchRead record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, false); - status.SetRowError(); - } - return true; - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchReadListCommand(cluster, batchNode, batchPolicy, records, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, status); - } - } - - //------------------------------------------------------- - // GetArray - //------------------------------------------------------- - - public sealed class BatchGetArrayCommand : BatchCommand - { - private readonly Key[] keys; - private readonly string[] binNames; - private readonly Operation[] ops; - private readonly Record[] records; - private readonly int readAttr; - - public BatchGetArrayCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy policy, - Key[] keys, - string[] binNames, - Operation[] ops, - Record[] records, - int readAttr, - bool isOperation, - BatchStatus status - ) : base(cluster, batch, policy, status, isOperation) - { - this.keys = keys; - this.binNames = binNames; - this.ops = ops; - this.records = records; - this.readAttr = readAttr; - } - - protected internal override void WriteBuffer() - { - if (batch.node != null && batch.node.HasBatchAny) - { - BatchAttr attr = new BatchAttr(policy, readAttr, ops); - SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); - } - } - - protected internal override bool ParseRow() - { - SkipKey(fieldCount); - - if (resultCode == 0) - { - records[batchIndex] = ParseRecord(); - } - return true; - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchGetArrayCommand(cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status); - } - } - - //------------------------------------------------------- - // ExistsArray - //------------------------------------------------------- - - public sealed class BatchExistsArrayCommand : BatchCommand - { - private readonly Key[] keys; - private readonly bool[] existsArray; - - public BatchExistsArrayCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy policy, - Key[] keys, - bool[] existsArray, - BatchStatus status - ) : base(cluster, batch, policy, status, false) - { - this.keys = keys; - this.existsArray = existsArray; - } - - protected internal override void WriteBuffer() - { - if (batch.node != null && batch.node.HasBatchAny) - { - BatchAttr attr = new BatchAttr(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA); - SetBatchOperate(batchPolicy, keys, batch, null, null, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); - } - } - - protected internal override bool ParseRow() - { - SkipKey(fieldCount); - - if (opCount > 0) - { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - - existsArray[batchIndex] = resultCode == 0; - return true; - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchExistsArrayCommand(cluster, batchNode, batchPolicy, keys, existsArray, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status); - } - } - - //------------------------------------------------------- - // OperateList - //------------------------------------------------------- - - public sealed class BatchOperateListCommand : BatchCommand - { - private readonly IList records; - - public BatchOperateListCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy policy, - IList records, - BatchStatus status - ) : base(cluster, batch, policy, status, true) - { - this.records = records; - } - - protected internal override bool IsWrite() - { - // This method is only called to set inDoubt on node level errors. - // SetError() will filter out reads when setting record level inDoubt. - return true; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, (IList)records, batch); - } - - protected internal override bool ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - return true; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); - status.SetRowError(); - return true; - } - } - - record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); - status.SetRowError(); - return true; - } - - protected internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = record.hasWrite; - } - } - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchOperateListCommand(cluster, batchNode, batchPolicy, records, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, (IList)records, sequenceAP, sequenceSC, batch, status); - } - } - - //------------------------------------------------------- - // OperateArray - //------------------------------------------------------- - - public sealed class BatchOperateArrayCommand : BatchCommand - { - private readonly Key[] keys; - private readonly Operation[] ops; - private readonly BatchRecord[] records; - private readonly BatchAttr attr; - - public BatchOperateArrayCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - Operation[] ops, - BatchRecord[] records, - BatchAttr attr, - BatchStatus status - ) : base(cluster, batch, batchPolicy, status, ops != null) - { - this.keys = keys; - this.ops = ops; - this.records = records; - this.attr = attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); - } - - protected internal override bool ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - status.SetRowError(); - } - return true; - } - - protected internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = inDoubt; - } - } - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); - } - } - - //------------------------------------------------------- - // UDF - //------------------------------------------------------- - - public sealed class BatchUDFCommand : BatchCommand - { - private readonly Key[] keys; - private readonly string packageName; - private readonly string functionName; - private readonly byte[] argBytes; - private readonly BatchRecord[] records; - private readonly BatchAttr attr; - - public BatchUDFCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchRecord[] records, - BatchAttr attr, - BatchStatus status - ) : base(cluster, batch, batchPolicy, status, false) - { - this.keys = keys; - this.packageName = packageName; - this.functionName = functionName; - this.argBytes = argBytes; - this.records = records; - this.attr = attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); - } - - protected internal override bool ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - return true; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter); - status.SetRowError(); - return true; - } - } - - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - status.SetRowError(); - return true; - } - - protected internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = inDoubt; - } - } - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); - } - } - - //------------------------------------------------------- - // Batch Base Command - //------------------------------------------------------- - - public abstract class BatchCommand : MultiCommand - { - internal readonly BatchNode batch; - internal readonly BatchPolicy batchPolicy; - internal readonly BatchStatus status; - internal BatchExecutor parent; - internal uint sequenceAP; - internal uint sequenceSC; - internal bool splitRetry; - - public BatchCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - BatchStatus status, - bool isOperation - ) : base(cluster, batchPolicy, batch.node, isOperation) - { - this.batch = batch; - this.batchPolicy = batchPolicy; - this.status = status; - } - - public void Run(object obj) - { - try - { - Execute(); - } - catch (AerospikeException ae) - { - // Set error/inDoubt for keys associated this batch command when - // the command was not retried and split. If a split retry occurred, - // those new subcommands have already set error/inDoubt on the affected - // subset of keys. - if (!splitRetry) - { - SetInDoubt(ae.InDoubt); - } - status.SetException(ae); - } - catch (Exception e) - { - if (!splitRetry) - { - SetInDoubt(true); - } - status.SetException(e); - } - finally - { - parent.OnComplete(); - } - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.BATCH; - } - - protected internal override bool PrepareRetry(bool timeout) - { - if (!((batchPolicy.replica == Replica.SEQUENCE || batchPolicy.replica == Replica.PREFER_RACK) && - (parent == null || !parent.IsDone()))) - { - // Perform regular retry to same node. - return true; - } - sequenceAP++; - - if (!timeout || batchPolicy.readModeSC != ReadModeSC.LINEARIZE) - { - sequenceSC++; - } - return false; - } - - protected internal override bool RetryBatch - ( - Cluster cluster, - int socketTimeout, - int totalTimeout, - DateTime deadline, - int iteration, - int commandSentCounter - ) - { - // Retry requires keys for this node to be split among other nodes. - // This is both recursive and exponential. - List batchNodes = GenerateBatchNodes(); - - if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) - { - // Batch node is the same. Go through normal retry. - return false; - } - - splitRetry = true; - - // Run batch requests sequentially in same thread. - foreach (BatchNode batchNode in batchNodes) - { - BatchCommand command = CreateCommand(batchNode); - command.parent = parent; - command.sequenceAP = sequenceAP; - command.sequenceSC = sequenceSC; - command.socketTimeout = socketTimeout; - command.totalTimeout = totalTimeout; - command.iteration = iteration; - command.commandSentCounter = commandSentCounter; - command.deadline = deadline; - - try - { - cluster.AddRetry(); - command.ExecuteCommand(); - } - catch (AerospikeException ae) - { - if (!command.splitRetry) - { - command.SetInDoubt(ae.InDoubt); - } - status.SetException(ae); - - if (!batchPolicy.respondAllKeys) - { - throw; - } - } - catch (Exception e) - { - if (!command.splitRetry) - { - command.SetInDoubt(true); - } - status.SetException(e); - - if (!batchPolicy.respondAllKeys) - { - throw; - } - } - } - return true; - } - - protected internal virtual void SetInDoubt(bool inDoubt) - { - // Do nothing by default. Batch writes will override this method. - } - - protected internal abstract BatchCommand CreateCommand(BatchNode batchNode); - protected internal abstract List GenerateBatchNodes(); - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System.Collections; + +namespace Aerospike.Client +{ + //------------------------------------------------------- + // ReadList + //------------------------------------------------------- + + public sealed class BatchReadListCommand : BatchCommand + { + private readonly List records; + + public BatchReadListCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy policy, + List records, + BatchStatus status + ) : base(cluster, batch, policy, status, true) + { + this.records = records; + } + + protected internal override void WriteBuffer() + { + if (batch.node != null && batch.node.HasBatchAny) + { + SetBatchOperate(batchPolicy, records, batch); + } + else + { + SetBatchRead(batchPolicy, records, batch); + } + } + + protected internal override bool ParseRow() + { + BatchRead record = records[batchIndex]; + + ParseFieldsRead(record.key); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, false); + status.SetRowError(); + } + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchReadListCommand(cluster, batchNode, batchPolicy, records, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, status); + } + } + + //------------------------------------------------------- + // GetArray + //------------------------------------------------------- + + public sealed class BatchGetArrayCommand : BatchCommand + { + private readonly Key[] keys; + private readonly string[] binNames; + private readonly Operation[] ops; + private readonly Record[] records; + private readonly int readAttr; + + public BatchGetArrayCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy policy, + Key[] keys, + string[] binNames, + Operation[] ops, + Record[] records, + int readAttr, + bool isOperation, + BatchStatus status + ) : base(cluster, batch, policy, status, isOperation) + { + this.keys = keys; + this.binNames = binNames; + this.ops = ops; + this.records = records; + this.readAttr = readAttr; + } + + protected internal override void WriteBuffer() + { + if (batch.node != null && batch.node.HasBatchAny) + { + BatchAttr attr = new(policy, readAttr, ops); + SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); + } + } + + protected internal override bool ParseRow() + { + ParseFieldsRead(keys[batchIndex]); + + if (resultCode == 0) + { + records[batchIndex] = ParseRecord(); + } + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchGetArrayCommand(cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status); + } + } + + //------------------------------------------------------- + // ExistsArray + //------------------------------------------------------- + + public sealed class BatchExistsArrayCommand : BatchCommand + { + private readonly Key[] keys; + private readonly bool[] existsArray; + + public BatchExistsArrayCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy policy, + Key[] keys, + bool[] existsArray, + BatchStatus status + ) : base(cluster, batch, policy, status, false) + { + this.keys = keys; + this.existsArray = existsArray; + } + + protected internal override void WriteBuffer() + { + if (batch.node != null && batch.node.HasBatchAny) + { + BatchAttr attr = new(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + SetBatchOperate(batchPolicy, keys, batch, null, null, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); + } + } + + protected internal override bool ParseRow() + { + ParseFieldsRead(keys[batchIndex]); + existsArray[batchIndex] = resultCode == 0; + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchExistsArrayCommand(cluster, batchNode, batchPolicy, keys, existsArray, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status); + } + } + + //------------------------------------------------------- + // OperateList + //------------------------------------------------------- + + public sealed class BatchOperateListCommand : BatchCommand + { + private readonly IList records; + + public BatchOperateListCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy policy, + IList records, + BatchStatus status + ) : base(cluster, batch, policy, status, true) + { + this.records = records; + } + + protected internal override bool IsWrite() + { + // This method is only called to set inDoubt on node level errors. + // SetError() will filter out reads when setting record level inDoubt. + return true; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, (IList)records, batch); + } + + protected internal override bool ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + return true; + } + + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); + status.SetRowError(); + return true; + } + } + + record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); + status.SetRowError(); + return true; + } + + protected internal override void InDoubt() + { + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchOperateListCommand(cluster, batchNode, batchPolicy, records, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, (IList)records, sequenceAP, sequenceSC, batch, status); + } + } + + //------------------------------------------------------- + // OperateArray + //------------------------------------------------------- + + public sealed class BatchOperateArrayCommand : BatchCommand + { + private readonly Key[] keys; + private readonly Operation[] ops; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public BatchOperateArrayCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + Operation[] ops, + BatchRecord[] records, + BatchAttr attr, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, ops != null) + { + this.keys = keys; + this.ops = ops; + this.records = records; + this.attr = attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); + } + + protected internal override bool ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + status.SetRowError(); + } + return true; + } + + protected internal override void InDoubt() + { + if (!attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + + if (policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); + } + } + + //------------------------------------------------------- + // UDF + //------------------------------------------------------- + + public sealed class BatchUDFCommand : BatchCommand + { + private readonly Key[] keys; + private readonly string packageName; + private readonly string functionName; + private readonly byte[] argBytes; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public BatchUDFCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchRecord[] records, + BatchAttr attr, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, false) + { + this.keys = keys; + this.packageName = packageName; + this.functionName = functionName; + this.argBytes = argBytes; + this.records = records; + this.attr = attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); + } + + protected internal override bool ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + return true; + } + + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter); + status.SetRowError(); + return true; + } + } + + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + status.SetRowError(); + return true; + } + + protected internal override void InDoubt() + { + if (!attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + + if (policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); + } + } + + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public sealed class BatchTxnVerify : BatchCommand + { + private readonly Key[] keys; + private readonly long?[] versions; + private readonly BatchRecord[] records; + + public BatchTxnVerify( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + long?[] versions, + BatchRecord[] records, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, false) + { + this.keys = keys; + this.versions = versions; + this.records = records; + } + + protected internal override bool IsWrite() + { + return false; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnVerify(batchPolicy, keys, versions, batch); + } + + protected internal override bool ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, false); + status.SetRowError(); + } + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchTxnVerify(cluster, batchNode, batchPolicy, keys, versions, records, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, false, status); + } + } + + public sealed class BatchTxnRoll : BatchCommand + { + private readonly Txn txn; + private readonly Key[] keys; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public BatchTxnRoll( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Txn txn, + Key[] keys, + BatchRecord[] records, + BatchAttr attr, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, false) + { + this.txn = txn; + this.keys = keys; + this.records = records; + this.attr = attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnRoll(batchPolicy, txn, keys, batch, attr); + } + + protected internal override bool ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == 0) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + status.SetRowError(); + } + return true; + } + + protected internal override void InDoubt() + { + if (!attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchTxnRoll(cluster, batchNode, batchPolicy, txn, keys, records, attr, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); + } + } + + //------------------------------------------------------- + // Batch Base Command + //------------------------------------------------------- + + public abstract class BatchCommand : MultiCommand + { + internal readonly BatchNode batch; + internal readonly BatchPolicy batchPolicy; + internal readonly BatchStatus status; + internal BatchExecutor parent; + internal uint sequenceAP; + internal uint sequenceSC; + internal bool splitRetry; + + public BatchCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + BatchStatus status, + bool isOperation + ) : base(cluster, batchPolicy, batch.node, isOperation) + { + this.batch = batch; + this.batchPolicy = batchPolicy; + this.status = status; + } + + public void Run(object obj) + { + try + { + Execute(); + } + catch (AerospikeException ae) + { + if (ae.InDoubt) + { + SetInDoubt(); + } + status.SetException(ae); + } + catch (Exception e) + { + SetInDoubt(); + status.SetException(e); + } + finally + { + parent.OnComplete(); + } + } + + protected void ParseFieldsRead(Key key) + { + if (policy.Txn != null) + { + long? version = ParseVersion(fieldCount); + policy.Txn.OnRead(key, version); + } + else + { + SkipKey(fieldCount); + } + } + + protected void ParseFields(BatchRecord br) + { + if (policy.Txn != null) + { + long? version = ParseVersion(fieldCount); + + if (br.hasWrite) + { + policy.Txn.OnWrite(br.key, version, resultCode); + } + else + { + policy.Txn.OnRead(br.key, version); + } + } + else + { + SkipKey(fieldCount); + } + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.BATCH; + } + + protected internal override bool PrepareRetry(bool timeout) + { + if (!((batchPolicy.replica == Replica.SEQUENCE || batchPolicy.replica == Replica.PREFER_RACK) && + (parent == null || !parent.IsDone()))) + { + // Perform regular retry to same node. + return true; + } + sequenceAP++; + + if (!timeout || batchPolicy.readModeSC != ReadModeSC.LINEARIZE) + { + sequenceSC++; + } + return false; + } + + protected internal override bool RetryBatch + ( + Cluster cluster, + int socketTimeout, + int totalTimeout, + DateTime deadline, + int iteration, + int commandSentCounter + ) + { + // Retry requires keys for this node to be split among other nodes. + // This is both recursive and exponential. + List batchNodes = GenerateBatchNodes(); + + if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) + { + // Batch node is the same. Go through normal retry. + return false; + } + + splitRetry = true; + + // Run batch requests sequentially in same thread. + foreach (BatchNode batchNode in batchNodes) + { + BatchCommand command = CreateCommand(batchNode); + command.parent = parent; + command.sequenceAP = sequenceAP; + command.sequenceSC = sequenceSC; + command.socketTimeout = socketTimeout; + command.totalTimeout = totalTimeout; + command.iteration = iteration; + command.commandSentCounter = commandSentCounter; + command.deadline = deadline; + + try + { + cluster.AddRetry(); + command.ExecuteCommand(); + } + catch (AerospikeException ae) + { + if (ae.InDoubt) + { + SetInDoubt(); + } + status.SetException(ae); + + if (!batchPolicy.respondAllKeys) + { + throw; + } + } + catch (Exception e) + { + if (!command.splitRetry) + { + SetInDoubt(); + } + status.SetException(e); + + if (!batchPolicy.respondAllKeys) + { + throw; + } + } + } + return true; + } + + protected internal void SetInDoubt() + { + // Set error/inDoubt for keys associated this batch command when + // the command was not retried and split. If a split retry occurred, + // those new subcommands have already set inDoubt on the affected + // subset of keys. + if (!splitRetry) + { + InDoubt(); + } + } + + protected internal virtual void InDoubt() + { + // Do nothing by default. Batch writes will override this method. + } + + protected internal abstract BatchCommand CreateCommand(BatchNode batchNode); + protected internal abstract List GenerateBatchNodes(); + } +} diff --git a/AerospikeClient/Command/BatchAttr.cs b/AerospikeClient/Command/BatchAttr.cs index 65c13b13..b58f03bb 100644 --- a/AerospikeClient/Command/BatchAttr.cs +++ b/AerospikeClient/Command/BatchAttr.cs @@ -22,7 +22,9 @@ public sealed class BatchAttr public int readAttr; public int writeAttr; public int infoAttr; + public int txnAttr; public int expiration; + public int opSize; public short generation; public bool hasWrite; public bool sendKey; @@ -394,5 +396,30 @@ public void SetDelete(BatchDeletePolicy dp) infoAttr |= Command.INFO3_COMMIT_MASTER; } } + + public void SetOpSize(Operation[] ops) + { + int dataOffset = 0; + + foreach (Operation op in ops) + { + dataOffset += ByteUtil.EstimateSizeUtf8(op.binName) + Command.OPERATION_HEADER_SIZE; + dataOffset += op.value.EstimateSize(); + } + opSize = dataOffset; + } + + public void SetTxn(int attr) + { + filterExp = null; + readAttr = 0; + writeAttr = Command.INFO2_WRITE | Command.INFO2_RESPOND_ALL_OPS | Command.INFO2_DURABLE_DELETE; + infoAttr = 0; + txnAttr = attr; + expiration = 0; + generation = 0; + hasWrite = true; + sendKey = false; + } } } diff --git a/AerospikeClient/Command/BatchExecutor.cs b/AerospikeClient/Command/BatchExecutor.cs index 7fc82ac4..cd508292 100644 --- a/AerospikeClient/Command/BatchExecutor.cs +++ b/AerospikeClient/Command/BatchExecutor.cs @@ -1,167 +1,170 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System; -using System.Threading; - -namespace Aerospike.Client -{ - public sealed class BatchExecutor - { - public static void Execute(Cluster cluster, BatchPolicy policy, BatchCommand[] commands, BatchStatus status) - { - cluster.AddTran(); - - if (policy.maxConcurrentThreads == 1 || commands.Length <= 1) - { - // Run batch requests sequentially in same thread. - foreach (BatchCommand command in commands) - { - try - { - command.Execute(); - } - catch (AerospikeException ae) - { - // Set error/inDoubt for keys associated this batch command when - // the command was not retried and split. If a split retry occurred, - // those new subcommands have already set error/inDoubt on the affected - // subset of keys. - if (!command.splitRetry) - { - command.SetInDoubt(ae.InDoubt); - } - status.SetException(ae); - - if (!policy.respondAllKeys) - { - throw; - } - } - catch (Exception e) - { - if (!command.splitRetry) - { - command.SetInDoubt(true); - } - status.SetException(e); - - if (!policy.respondAllKeys) - { - throw; - } - } - } - status.CheckException(); - return; - } - - // Run batch requests in parallel in separate threads. - BatchExecutor executor = new BatchExecutor(policy, commands, status); - executor.Execute(); - } - - public static void Execute(BatchCommand command, BatchStatus status) - { - command.Execute(); - status.CheckException(); - } - - private readonly BatchStatus status; - private readonly int maxConcurrentThreads; - private readonly BatchCommand[] commands; - private int completedCount; - private volatile int done; - private bool completed; - - private BatchExecutor(BatchPolicy policy, BatchCommand[] commands, BatchStatus status) - { - this.commands = commands; - this.status = status; - this.maxConcurrentThreads = (policy.maxConcurrentThreads == 0 || policy.maxConcurrentThreads >= commands.Length) ? commands.Length : policy.maxConcurrentThreads; - } - - internal void Execute() - { - // Start threads. - for (int i = 0; i < maxConcurrentThreads; i++) - { - BatchCommand cmd = commands[i]; - cmd.parent = this; - ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null); - } - - // Multiple threads write to the batch record array/list, so one might think that memory barriers - // are needed. That should not be necessary because of this synchronized waitTillComplete(). - WaitTillComplete(); - - // Throw an exception if an error occurred. - status.CheckException(); - } - - internal void OnComplete() - { - int finished = Interlocked.Increment(ref completedCount); - - if (finished < commands.Length) - { - int nextThread = finished + maxConcurrentThreads - 1; - - // Determine if a new thread needs to be started. - if (nextThread < commands.Length && done == 0) - { - // Start new thread. - BatchCommand cmd = commands[nextThread]; - cmd.parent = this; - ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null); - } - } - else - { - // Ensure executor succeeds or fails exactly once. - if (Interlocked.Exchange(ref done, 1) == 0) - { - NotifyCompleted(); - } - } - } - - internal bool IsDone() - { - return done != 0; - } - - private void WaitTillComplete() - { - lock (this) - { - while (!completed) - { - Monitor.Wait(this); - } - } - } - - private void NotifyCompleted() - { - lock (this) - { - completed = true; - Monitor.Pulse(this); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System; +using System.Threading; + +namespace Aerospike.Client +{ + public sealed class BatchExecutor + { + public static void Execute(Cluster cluster, BatchPolicy policy, BatchCommand[] commands, BatchStatus status) + { + cluster.AddCommandCount(); + + if (policy.maxConcurrentThreads == 1 || commands.Length <= 1) + { + // Run batch requests sequentially in same thread. + foreach (BatchCommand command in commands) + { + try + { + command.Execute(); + } + catch (AerospikeException ae) + { + // Set error/inDoubt for keys associated this batch command when + // the command was not retried and split. If a split retry occurred, + // those new subcommands have already set error/inDoubt on the affected + // subset of keys. + if (!command.splitRetry) + { + if (ae.InDoubt) + { + command.SetInDoubt(); + } + } + status.SetException(ae); + + if (!policy.respondAllKeys) + { + throw; + } + } + catch (Exception e) + { + if (!command.splitRetry) + { + command.SetInDoubt(); + } + status.SetException(e); + + if (!policy.respondAllKeys) + { + throw; + } + } + } + status.CheckException(); + return; + } + + // Run batch requests in parallel in separate threads. + BatchExecutor executor = new BatchExecutor(policy, commands, status); + executor.Execute(); + } + + public static void Execute(BatchCommand command, BatchStatus status) + { + command.Execute(); + status.CheckException(); + } + + private readonly BatchStatus status; + private readonly int maxConcurrentThreads; + private readonly BatchCommand[] commands; + private int completedCount; + private volatile int done; + private bool completed; + + private BatchExecutor(BatchPolicy policy, BatchCommand[] commands, BatchStatus status) + { + this.commands = commands; + this.status = status; + this.maxConcurrentThreads = (policy.maxConcurrentThreads == 0 || policy.maxConcurrentThreads >= commands.Length) ? commands.Length : policy.maxConcurrentThreads; + } + + internal void Execute() + { + // Start threads. + for (int i = 0; i < maxConcurrentThreads; i++) + { + BatchCommand cmd = commands[i]; + cmd.parent = this; + ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null); + } + + // Multiple threads write to the batch record array/list, so one might think that memory barriers + // are needed. That should not be necessary because of this synchronized waitTillComplete(). + WaitTillComplete(); + + // Throw an exception if an error occurred. + status.CheckException(); + } + + internal void OnComplete() + { + int finished = Interlocked.Increment(ref completedCount); + + if (finished < commands.Length) + { + int nextThread = finished + maxConcurrentThreads - 1; + + // Determine if a new thread needs to be started. + if (nextThread < commands.Length && done == 0) + { + // Start new thread. + BatchCommand cmd = commands[nextThread]; + cmd.parent = this; + ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null); + } + } + else + { + // Ensure executor succeeds or fails exactly once. + if (Interlocked.Exchange(ref done, 1) == 0) + { + NotifyCompleted(); + } + } + } + + internal bool IsDone() + { + return done != 0; + } + + private void WaitTillComplete() + { + lock (this) + { + while (!completed) + { + Monitor.Wait(this); + } + } + } + + private void NotifyCompleted() + { + lock (this) + { + completed = true; + Monitor.Pulse(this); + } + } + } +} diff --git a/AerospikeClient/Command/ByteUtil.cs b/AerospikeClient/Command/ByteUtil.cs index fbb2a2e5..456fcd1d 100644 --- a/AerospikeClient/Command/ByteUtil.cs +++ b/AerospikeClient/Command/ByteUtil.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -41,7 +41,7 @@ public static Value BytesToKeyValue(ParticleType type, byte[] buf, int offset, i case ParticleType.BLOB: byte[] dest = new byte[len]; Array.Copy(buf, offset, dest, 0, len); - return Value.Get(dest); + return Value.Get(dest); default: return null; @@ -424,15 +424,52 @@ public static long LittleBytesToLong(byte[] buf, int offset) ); } - //------------------------------------------------------- - // 32 bit number conversions. - //------------------------------------------------------- - - /// - /// Convert int to big endian 32 bits. - /// The bit pattern will be the same regardless of sign. + //------------------------------------------------------- + // Transaction version conversions. + //------------------------------------------------------- + + /// + /// Convert long to a 7 byte record version for MRT. + /// + public static void LongToVersionBytes(long v, byte[] buf, int offset) + { + buf[offset++] = (byte)(v >> 0); + buf[offset++] = (byte)(v >> 8); + buf[offset++] = (byte)(v >> 16); + buf[offset++] = (byte)(v >> 24); + buf[offset++] = (byte)(v >> 32); + buf[offset++] = (byte)(v >> 40); + buf[offset] = (byte)(v >> 48); + } + + /// + /// Convert 7 byte record version to a long for MRT. /// - public static int IntToBytes(uint v, byte[] buf, int offset) + /// + /// + /// + public static long VersionBytesToLong(byte[] buf, int offset) + { + return ( + ((long)(buf[offset] & 0xFF) << 0) | + ((long)(buf[offset + 1] & 0xFF) << 8) | + ((long)(buf[offset + 2] & 0xFF) << 16) | + ((long)(buf[offset + 3] & 0xFF) << 24) | + ((long)(buf[offset + 4] & 0xFF) << 32) | + ((long)(buf[offset + 5] & 0xFF) << 40) | + ((long)(buf[offset + 6] & 0xFF) << 48) + ); + } + + //------------------------------------------------------- + // 32 bit number conversions. + //------------------------------------------------------- + + /// + /// Convert int to big endian 32 bits. + /// The bit pattern will be the same regardless of sign. + /// + public static int IntToBytes(uint v, byte[] buf, int offset) { // Benchmarks show that custom conversion is faster than System.BitConverter.GetBytes(). // Assume little endian machine and reverse/convert in one pass. diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index 0f0fe138..7c38a9e8 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -1,2362 +1,3311 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Collections; - -#pragma warning disable 0618 - -namespace Aerospike.Client -{ - public abstract class Command - { - public static readonly int INFO1_READ = (1 << 0); // Contains a read operation. - public static readonly int INFO1_GET_ALL = (1 << 1); // Get all bins. - public static readonly int INFO1_SHORT_QUERY = (1 << 2); // Short query. - public static readonly int INFO1_BATCH = (1 << 3); // Batch read or exists. - public static readonly int INFO1_NOBINDATA = (1 << 5); // Do not read the bins. - public static readonly int INFO1_READ_MODE_AP_ALL = (1 << 6); // Involve all replicas in read operation. - public static readonly int INFO1_COMPRESS_RESPONSE = (1 << 7); // Tell server to compress it's response. - - public static readonly int INFO2_WRITE = (1 << 0); // Create or update record - public static readonly int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch. - public static readonly int INFO2_GENERATION = (1 << 2); // Update if expected generation == old. - public static readonly int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore. - public static readonly int INFO2_DURABLE_DELETE = (1 << 4); // Transaction resulting in record deletion leaves tombstone (Enterprise only). - public static readonly int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists. - public static readonly int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relac read consistency - public static readonly int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation. - - public static readonly int INFO3_LAST = (1 << 0); // This is the last of a multi-part message. - public static readonly int INFO3_COMMIT_MASTER = (1 << 1); // Commit to master only before declaring success. - // On send: Do not return partition done in scan/query. - // On receive: Specified partition is done in scan/query. - public static readonly int INFO3_PARTITION_DONE = (1 << 2); - public static readonly int INFO3_UPDATE_ONLY = (1 << 3); // Update only. Merge bins. - public static readonly int INFO3_CREATE_OR_REPLACE = (1 << 4); // Create or completely replace record. - public static readonly int INFO3_REPLACE_ONLY = (1 << 5); // Completely replace existing record only. - public static readonly int INFO3_SC_READ_TYPE = (1 << 6); // See below. - public static readonly int INFO3_SC_READ_RELAX = (1 << 7); // See below. - - // Interpret SC_READ bits in info3. - // - // RELAX TYPE - // strict - // ------ - // 0 0 sequential (default) - // 0 1 linearize - // - // relaxed - // ------- - // 1 0 allow replica - // 1 1 allow unavailable - - public const byte STATE_READ_AUTH_HEADER = 1; - public const byte STATE_READ_HEADER = 2; - public const byte STATE_READ_DETAIL = 3; - public const byte STATE_COMPLETE = 4; - - public const byte BATCH_MSG_READ = 0x0; - public const byte BATCH_MSG_REPEAT = 0x1; - public const byte BATCH_MSG_INFO = 0x2; - public const byte BATCH_MSG_GEN = 0x4; - public const byte BATCH_MSG_TTL = 0x8; - - public const int MSG_TOTAL_HEADER_SIZE = 30; - public const int FIELD_HEADER_SIZE = 5; - public const int OPERATION_HEADER_SIZE = 8; - public const int MSG_REMAINING_HEADER_SIZE = 22; - public const int DIGEST_SIZE = 20; - public const int COMPRESS_THRESHOLD = 128; - public const ulong CL_MSG_VERSION = 2UL; - public const ulong AS_MSG_TYPE = 3UL; - public const ulong MSG_TYPE_COMPRESSED = 4UL; - - internal byte[] dataBuffer; - internal int dataOffset; - internal readonly int maxRetries; - internal readonly int serverTimeout; - internal int socketTimeout; - internal int totalTimeout; - - public Command(int socketTimeout, int totalTimeout, int maxRetries) - { - this.maxRetries = maxRetries; - this.totalTimeout = totalTimeout; - - if (totalTimeout > 0) - { - this.socketTimeout = (socketTimeout < totalTimeout && socketTimeout > 0) ? socketTimeout : totalTimeout; - this.serverTimeout = this.socketTimeout; - } - else - { - this.socketTimeout = socketTimeout; - this.serverTimeout = 0; - } - } - - //-------------------------------------------------- - // Writes - //-------------------------------------------------- - - public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - foreach (Bin bin in bins) - { - EstimateOperationSize(bin); - } - - bool compress = SizeBuffer(policy); - - WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - foreach (Bin bin in bins) - { - WriteOperation(bin, operation); - } - End(compress); - } - - public virtual void SetDelete(WritePolicy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - SizeBuffer(); - WriteHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - End(); - } - - public virtual void SetTouch(WritePolicy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - EstimateOperationSize(); - SizeBuffer(); - WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - WriteOperation(Operation.Type.TOUCH); - End(); - } - - //-------------------------------------------------- - // Reads - //-------------------------------------------------- - - public virtual void SetExists(Policy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - SizeBuffer(); - WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - End(); - } - - public virtual void SetRead(Policy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - SizeBuffer(); - WriteHeaderRead(policy, serverTimeout, Command.INFO1_READ | Command.INFO1_GET_ALL, 0, 0, fieldCount, 0); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - End(); - } - - public virtual void SetRead(Policy policy, Key key, string[] binNames) - { - if (binNames != null) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - foreach (string binName in binNames) - { - EstimateOperationSize(binName); - } - SizeBuffer(); - WriteHeaderRead(policy, serverTimeout, Command.INFO1_READ, 0, 0, fieldCount, binNames.Length); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - foreach (string binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - End(); - } - else - { - SetRead(policy, key); - } - } - - public virtual void SetReadHeader(Policy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - EstimateOperationSize((string)null); - SizeBuffer(); - WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - End(); - } - - //-------------------------------------------------- - // Operate - //-------------------------------------------------- - - public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - dataOffset += args.size; - - bool compress = SizeBuffer(policy); - - WriteHeaderReadWrite(policy, args, fieldCount); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - foreach (Operation operation in args.operations) - { - WriteOperation(operation); - } - End(compress); - } - - //-------------------------------------------------- - // UDF - //-------------------------------------------------- - - public virtual void SetUdf(WritePolicy policy, Key key, string packageName, string functionName, Value[] args) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - byte[] argBytes = Packer.Pack(args); - fieldCount += EstimateUdfSize(packageName, functionName, argBytes); - - bool compress = SizeBuffer(policy); - - WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0); - WriteKey(policy, key); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - WriteField(packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(functionName, FieldType.UDF_FUNCTION); - WriteField(argBytes, FieldType.UDF_ARGLIST); - End(compress); - } - - //-------------------------------------------------- - // Batch Read Only - //-------------------------------------------------- - - public virtual void SetBatchRead(BatchPolicy policy, List records, BatchNode batch) - { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; - BatchRead prev = null; - - Begin(); - int fieldCount = 1; - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - dataOffset += FIELD_HEADER_SIZE + 5; - - for (int i = 0; i < max; i++) - { - BatchRead record = records[offsets[i]]; - Key key = record.key; - string[] binNames = record.binNames; - Operation[] ops = record.ops; - - dataOffset += key.digest.Length + 4; - - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && - prev.binNames == binNames && prev.readAllBins == record.readAllBins && - prev.ops == ops) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Estimate full header, namespace and bin names. - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - - if (binNames != null) - { - foreach (string binName in binNames) - { - EstimateOperationSize(binName); - } - } - else if (ops != null) - { - foreach (Operation op in ops) - { - EstimateReadOperationSize(op); - } - } - prev = record; - } - } - - bool compress = SizeBuffer(policy); - - int readAttr = Command.INFO1_READ; - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; - prev = null; - - for (int i = 0; i < max; i++) - { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; - - BatchRead record = records[index]; - Key key = record.key; - string[] binNames = record.binNames; - Operation[] ops = record.ops; - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && - prev.binNames == binNames && prev.readAllBins == record.readAllBins && - prev.ops == ops) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full header, namespace and bin names. - dataBuffer[dataOffset++] = BATCH_MSG_READ; - - if (binNames != null && binNames.Length != 0) - { - dataBuffer[dataOffset++] = (byte)readAttr; - WriteBatchFields(key, 0, binNames.Length); - - foreach (string binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - else if (ops != null) - { - int offset = dataOffset++; - WriteBatchFields(key, 0, ops.Length); - dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr); - } - else - { - dataBuffer[dataOffset++] = (byte)(readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA)); - WriteBatchFields(key, 0, 0); - } - prev = record; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); - } - - public virtual void SetBatchRead - ( - BatchPolicy policy, - Key[] keys, - BatchNode batch, - string[] binNames, - Operation[] ops, - int readAttr - ) - { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; - - // Estimate dataBuffer size. - Begin(); - int fieldCount = 1; - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - dataOffset += FIELD_HEADER_SIZE + 5; - - Key prev = null; - - for (int i = 0; i < max; i++) - { - Key key = keys[offsets[i]]; - - dataOffset += key.digest.Length + 4; - - // Try reference equality in hope that namespace for all keys is set from a fixed variable. - if (prev != null && prev.ns == key.ns && prev.setName == key.setName) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Estimate full header, namespace and bin names. - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - - if (binNames != null) - { - foreach (String binName in binNames) - { - EstimateOperationSize(binName); - } - } - else if (ops != null) - { - foreach (Operation op in ops) - { - EstimateReadOperationSize(op); - } - } - prev = key; - } - } - - bool compress = SizeBuffer(policy); - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; - prev = null; - - for (int i = 0; i < max; i++) - { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; - - Key key = keys[index]; - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - // Try reference equality in hope that namespace for all keys is set from a fixed variable. - if (prev != null && prev.ns == key.ns && prev.setName == key.setName) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full header, namespace and bin names. - dataBuffer[dataOffset++] = BATCH_MSG_READ; - - if (binNames != null && binNames.Length != 0) - { - dataBuffer[dataOffset++] = (byte)readAttr; - WriteBatchFields(key, 0, binNames.Length); - - foreach (String binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - else if (ops != null) - { - int offset = dataOffset++; - WriteBatchFields(key, 0, ops.Length); - dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr); - } - else - { - dataBuffer[dataOffset++] = (byte)readAttr; - WriteBatchFields(key, 0, 0); - } - prev = key; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); - } - - //-------------------------------------------------- - // Batch Read/Write Operations - //-------------------------------------------------- - - public virtual void SetBatchOperate(BatchPolicy policy, IList records, BatchNode batch) - { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; - BatchRecord prev = null; - - Begin(); - int fieldCount = 1; - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - dataOffset += FIELD_HEADER_SIZE + 5; - - for (int i = 0; i < max; i++) - { - BatchRecord record = (BatchRecord)records[offsets[i]]; - Key key = record.key; - - dataOffset += key.digest.Length + 4; - - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (!policy.sendKey && prev != null && prev.key.ns == key.ns && - prev.key.setName == key.setName && record.Equals(prev)) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Estimate full header, namespace and bin names. - dataOffset += 12; - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - dataOffset += record.Size(policy); - prev = record; - } - } - - bool compress = SizeBuffer(policy); - - WriteBatchHeader(policy, totalTimeout, fieldCount); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = GetBatchFlags(policy); - - BatchAttr attr = new BatchAttr(); - prev = null; - - for (int i = 0; i < max; i++) - { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; - - BatchRecord record = (BatchRecord)records[index]; - Key key = record.key; - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (!policy.sendKey && prev != null && prev.key.ns == key.ns && - prev.key.setName == key.setName && record.Equals(prev)) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full message. - switch (record.GetBatchType()) - { - case BatchRecord.Type.BATCH_READ: - { - BatchRead br = (BatchRead)record; - - if (br.policy != null) - { - attr.SetRead(br.policy); - } - else - { - attr.SetRead(policy); - } - - if (br.binNames != null) - { - WriteBatchBinNames(key, br.binNames, attr, attr.filterExp); - } - else if (br.ops != null) - { - attr.AdjustRead(br.ops); - WriteBatchOperations(key, br.ops, attr, attr.filterExp); - } - else - { - attr.AdjustRead(br.readAllBins); - WriteBatchRead(key, attr, attr.filterExp, 0); - } - break; - } - - case BatchRecord.Type.BATCH_WRITE: - { - BatchWrite bw = (BatchWrite)record; - - if (bw.policy != null) - { - attr.SetWrite(bw.policy); - } - else - { - attr.SetWrite(policy); - } - attr.AdjustWrite(bw.ops); - WriteBatchOperations(key, bw.ops, attr, attr.filterExp); - break; - } - - case BatchRecord.Type.BATCH_UDF: - { - BatchUDF bu = (BatchUDF)record; - - if (bu.policy != null) - { - attr.SetUDF(bu.policy); - } - else - { - attr.SetUDF(policy); - } - WriteBatchWrite(key, attr, attr.filterExp, 3, 0); - WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(bu.functionName, FieldType.UDF_FUNCTION); - WriteField(bu.argBytes, FieldType.UDF_ARGLIST); - break; - } - - case BatchRecord.Type.BATCH_DELETE: - { - BatchDelete bd = (BatchDelete)record; - - if (bd.policy != null) - { - attr.SetDelete(bd.policy); - } - else - { - attr.SetDelete(policy); - } - WriteBatchWrite(key, attr, attr.filterExp, 0, 0); - break; - } - } - prev = record; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); - } - - public virtual void SetBatchOperate - ( - BatchPolicy policy, - Key[] keys, - BatchNode batch, - string[] binNames, - Operation[] ops, - BatchAttr attr - ) - { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; - - // Estimate dataBuffer size. - Begin(); - int fieldCount = 1; - Expression exp = GetBatchExpression(policy, attr); - - if (exp != null) - { - dataOffset += exp.Size(); - fieldCount++; - } - - dataOffset += FIELD_HEADER_SIZE + 5; - - Key prev = null; - - for (int i = 0; i < max; i++) - { - Key key = keys[offsets[i]]; - - dataOffset += key.digest.Length + 4; - - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Write full header and namespace/set/bin names. - dataOffset += 12; // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12 - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - - if (attr.sendKey) - { - dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; - } - - if (binNames != null) - { - foreach (string binName in binNames) - { - EstimateOperationSize(binName); - } - } - else if (ops != null) - { - foreach (Operation op in ops) - { - if (Operation.IsWrite(op.type)) - { - if (!attr.hasWrite) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read"); - } - dataOffset += 2; // Extra write specific fields. - } - EstimateOperationSize(op); - } - } - else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) - { - dataOffset += 2; // Extra write specific fields. - } - prev = key; - } - } - - bool compress = SizeBuffer(policy); - - WriteBatchHeader(policy, totalTimeout, fieldCount); - - if (exp != null) - { - exp.Write(this); - } - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = GetBatchFlags(policy); - prev = null; - - for (int i = 0; i < max; i++) - { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; - - Key key = keys[index]; - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full message. - if (binNames != null) - { - WriteBatchBinNames(key, binNames, attr, null); - } - else if (ops != null) - { - WriteBatchOperations(key, ops, attr, null); - } - else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) - { - WriteBatchWrite(key, attr, null, 0, 0); - } - else - { - WriteBatchRead(key, attr, null, 0); - } - prev = key; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); - } - - public virtual void SetBatchUDF - ( - BatchPolicy policy, - Key[] keys, - BatchNode batch, - string packageName, - string functionName, - byte[] argBytes, - BatchAttr attr - ) - { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; - - // Estimate dataBuffer size. - Begin(); - int fieldCount = 1; - Expression exp = GetBatchExpression(policy, attr); - - if (exp != null) - { - dataOffset += exp.Size(); - fieldCount++; - } - - dataOffset += FIELD_HEADER_SIZE + 5; - - Key prev = null; - - for (int i = 0; i < max; i++) - { - Key key = keys[offsets[i]]; - - dataOffset += key.digest.Length + 4; - - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Write full header and namespace/set/bin names. - dataOffset += 12; // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12 - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - - if (attr.sendKey) - { - dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; - } - dataOffset += 2; // gen(2) = 6 - EstimateUdfSize(packageName, functionName, argBytes); - prev = key; - } - } - - bool compress = SizeBuffer(policy); - - WriteBatchHeader(policy, totalTimeout, fieldCount); - - if (exp != null) - { - exp.Write(this); - } - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = GetBatchFlags(policy); - prev = null; - - for (int i = 0; i < max; i++) - { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; - - Key key = keys[index]; - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full message. - WriteBatchWrite(key, attr, null, 3, 0); - WriteField(packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(functionName, FieldType.UDF_FUNCTION); - WriteField(argBytes, FieldType.UDF_ARGLIST); - prev = key; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); - } - - private static Expression GetBatchExpression(Policy policy, BatchAttr attr) - { - return (attr.filterExp != null) ? attr.filterExp : policy.filterExp; - } - - private static byte GetBatchFlags(BatchPolicy policy) - { - byte flags = 0x8; - - if (policy.allowInline) - { - flags |= 0x1; - } - - if (policy.allowInlineSSD) - { - flags |= 0x2; - } - - if (policy.respondAllKeys) - { - flags |= 0x4; - } - return flags; - } - - private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) - { - int readAttr = Command.INFO1_BATCH; - - if (policy.compress) - { - readAttr |= Command.INFO1_COMPRESS_RESPONSE; - } - - // Write all header data except total size which must be written last. - dataOffset += 8; - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)readAttr; - - Array.Clear(dataBuffer, dataOffset, 12); - dataOffset += 12; - - dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); - } - - private void WriteBatchBinNames(Key key, string[] binNames, BatchAttr attr, Expression filter) - { - WriteBatchRead(key, attr, filter, binNames.Length); - - foreach (string binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - - private void WriteBatchOperations(Key key, Operation[] ops, BatchAttr attr, Expression filter) - { - if (attr.hasWrite) - { - WriteBatchWrite(key, attr, filter, 0, ops.Length); - } - else - { - WriteBatchRead(key, attr, filter, ops.Length); - } - - foreach (Operation op in ops) - { - WriteOperation(op); - } - } - - private void WriteBatchRead(Key key, BatchAttr attr, Expression filter, int opCount) - { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - WriteBatchFields(key, filter, 0, opCount); - } - - private void WriteBatchWrite(Key key, BatchAttr attr, Expression filter, int fieldCount, int opCount) - { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - dataOffset += ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - - if (attr.sendKey) - { - fieldCount++; - WriteBatchFields(key, filter, fieldCount, opCount); - WriteField(key.userKey, FieldType.KEY); - } - else - { - WriteBatchFields(key, filter, fieldCount, opCount); - } - } - - private void WriteBatchFields(Key key, Expression filter, int fieldCount, int opCount) - { - if (filter != null) - { - fieldCount++; - WriteBatchFields(key, fieldCount, opCount); - filter.Write(this); - } - else - { - WriteBatchFields(key, fieldCount, opCount); - } - } - - private void WriteBatchFields(Key key, int fieldCount, int opCount) - { - fieldCount += 2; - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); - WriteField(key.ns, FieldType.NAMESPACE); - WriteField(key.setName, FieldType.TABLE); - } - - //-------------------------------------------------- - // Scan - //-------------------------------------------------- - - public virtual void SetScan - ( - Cluster cluster, - ScanPolicy policy, - string ns, - string setName, - string[] binNames, - ulong taskId, - NodePartitions nodePartitions - ) - { - Begin(); - int fieldCount = 0; - int partsFullSize = nodePartitions.partsFull.Count * 2; - int partsPartialSize = nodePartitions.partsPartial.Count * 20; - long maxRecords = nodePartitions.recordMax; - - if (ns != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(ns) + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (setName != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(setName) + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (partsFullSize > 0) - { - dataOffset += partsFullSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (partsPartialSize > 0) - { - dataOffset += partsPartialSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (maxRecords > 0) - { - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (policy.recordsPerSecond > 0) - { - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - // Estimate scan timeout size. - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - - // Estimate taskId size. - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - - if (binNames != null) - { - foreach (string binName in binNames) - { - EstimateOperationSize(binName); - } - } - - SizeBuffer(); - int readAttr = Command.INFO1_READ; - - if (!policy.includeBinData) - { - readAttr |= Command.INFO1_NOBINDATA; - } - - // Clusters that support partition queries also support not sending partition done messages. - int operationCount = (binNames == null) ? 0 : binNames.Length; - WriteHeaderRead(policy, totalTimeout, readAttr, 0, Command.INFO3_PARTITION_DONE, fieldCount, operationCount); - - if (ns != null) - { - WriteField(ns, FieldType.NAMESPACE); - } - - if (setName != null) - { - WriteField(setName, FieldType.TABLE); - } - - if (partsFullSize > 0) - { - WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsFull) - { - ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); - dataOffset += 2; - } - } - - if (partsPartialSize > 0) - { - WriteFieldHeader(partsPartialSize, FieldType.DIGEST_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsPartial) { - Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20); - dataOffset += 20; - } - } - - if (maxRecords > 0) - { - WriteField((ulong)maxRecords, FieldType.MAX_RECORDS); - } - - if (policy.recordsPerSecond > 0) - { - WriteField(policy.recordsPerSecond, FieldType.RECORDS_PER_SECOND); - } - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - // Write scan timeout - WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); - - // Write taskId field - WriteField(taskId, FieldType.TRAN_ID); - - if (binNames != null) - { - foreach (string binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - End(); - } - - //-------------------------------------------------- - // Query - //-------------------------------------------------- - - protected virtual internal void SetQuery - ( - Cluster cluster, - Policy policy, - Statement statement, - ulong taskId, - bool background, - NodePartitions nodePartitions - ) - { - byte[] functionArgBuffer = null; - int fieldCount = 0; - int filterSize = 0; - int binNameSize = 0; - bool isNew = cluster.hasPartitionQuery; - - Begin(); - - if (statement.ns != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(statement.ns) + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (statement.setName != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(statement.setName) + FIELD_HEADER_SIZE; - fieldCount++; - } - - // Estimate recordsPerSecond field size. This field is used in new servers and not used - // (but harmless to add) in old servers. - if (statement.recordsPerSecond > 0) - { - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - } - - // Estimate socket timeout field size. This field is used in new servers and not used - // (but harmless to add) in old servers. - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - - // Estimate taskId field. - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - - byte[] packedCtx = null; - - if (statement.filter != null) - { - IndexCollectionType type = statement.filter.CollectionType; - - // Estimate INDEX_TYPE field. - if (type != IndexCollectionType.DEFAULT) - { - dataOffset += FIELD_HEADER_SIZE + 1; - fieldCount++; - } - - // Estimate INDEX_RANGE field. - dataOffset += FIELD_HEADER_SIZE; - filterSize++; // num filters - filterSize += statement.filter.EstimateSize(); - dataOffset += filterSize; - fieldCount++; - - if (!isNew) - { - // Query bin names are specified as a field (Scan bin names are specified later as operations) - // in old servers. Estimate size for selected bin names. - if (statement.binNames != null && statement.binNames.Length > 0) - { - dataOffset += FIELD_HEADER_SIZE; - binNameSize++; // num bin names - - foreach (string binName in statement.binNames) - { - binNameSize += ByteUtil.EstimateSizeUtf8(binName) + 1; - } - dataOffset += binNameSize; - fieldCount++; - } - } - - packedCtx = statement.filter.PackedCtx; - - if (packedCtx != null) - { - dataOffset += FIELD_HEADER_SIZE + packedCtx.Length; - fieldCount++; - } - } - - // Estimate aggregation/background function size. - if (statement.functionName != null) - { - dataOffset += FIELD_HEADER_SIZE + 1; // udf type - dataOffset += ByteUtil.EstimateSizeUtf8(statement.packageName) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(statement.functionName) + FIELD_HEADER_SIZE; - - if (statement.functionArgs.Length > 0) - { - functionArgBuffer = Packer.Pack(statement.functionArgs); - } - else - { - functionArgBuffer = new byte[0]; - } - dataOffset += FIELD_HEADER_SIZE + functionArgBuffer.Length; - fieldCount += 4; - } - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - long maxRecords = 0; - int partsFullSize = 0; - int partsPartialDigestSize = 0; - int partsPartialBValSize = 0; - - if (nodePartitions != null) - { - partsFullSize = nodePartitions.partsFull.Count * 2; - partsPartialDigestSize = nodePartitions.partsPartial.Count * 20; - - if (statement.filter != null) - { - partsPartialBValSize = nodePartitions.partsPartial.Count * 8; - } - maxRecords = nodePartitions.recordMax; - } - - if (partsFullSize > 0) - { - dataOffset += partsFullSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (partsPartialDigestSize > 0) - { - dataOffset += partsPartialDigestSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (partsPartialBValSize > 0) - { - dataOffset += partsPartialBValSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - // Estimate max records field size. This field is used in new servers and not used - // (but harmless to add) in old servers. - if (maxRecords > 0) - { - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - } - - // Operations (used in query execute) and bin names (used in scan/query) are mutually exclusive. - int operationCount = 0; - - if (statement.operations != null) - { - // Estimate size for background operations. - if (!background) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Operations not allowed in foreground query"); - } - - foreach (Operation operation in statement.operations) - { - if (!Operation.IsWrite(operation.type)) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Read operations not allowed in background query"); - } - EstimateOperationSize(operation); - } - operationCount = statement.operations.Length; - } - else if (statement.binNames != null && (isNew || statement.filter == null)) - { - // Estimate size for selected bin names (query bin names already handled for old servers). - foreach (string binName in statement.binNames) - { - EstimateOperationSize(binName); - } - operationCount = statement.binNames.Length; - } - - SizeBuffer(); - - if (background) - { - WriteHeaderWrite((WritePolicy)policy, Command.INFO2_WRITE, fieldCount, operationCount); - } - else - { - QueryPolicy qp = (QueryPolicy)policy; - int readAttr = Command.INFO1_READ; - int writeAttr = 0; - - if (!qp.includeBinData) - { - readAttr |= Command.INFO1_NOBINDATA; - } - - if (qp.shortQuery || qp.expectedDuration == QueryDuration.SHORT) - { - readAttr |= Command.INFO1_SHORT_QUERY; - } - else if (qp.expectedDuration == QueryDuration.LONG_RELAX_AP) - { - writeAttr |= Command.INFO2_RELAX_AP_LONG_QUERY; - } - - int infoAttr = (isNew || statement.filter == null) ? Command.INFO3_PARTITION_DONE : 0; - - WriteHeaderRead(policy, totalTimeout, readAttr, writeAttr, infoAttr, fieldCount, operationCount); - } - - if (statement.ns != null) - { - WriteField(statement.ns, FieldType.NAMESPACE); - } - - if (statement.setName != null) - { - WriteField(statement.setName, FieldType.TABLE); - } - - // Write records per second. - if (statement.recordsPerSecond > 0) - { - WriteField(statement.recordsPerSecond, FieldType.RECORDS_PER_SECOND); - } - - // Write socket idle timeout. - WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); - - // Write taskId field - WriteField(taskId, FieldType.TRAN_ID); - - if (statement.filter != null) - { - IndexCollectionType type = statement.filter.CollectionType; - - if (type != IndexCollectionType.DEFAULT) - { - WriteFieldHeader(1, FieldType.INDEX_TYPE); - dataBuffer[dataOffset++] = (byte)type; - } - - WriteFieldHeader(filterSize, FieldType.INDEX_RANGE); - dataBuffer[dataOffset++] = (byte)1; - dataOffset = statement.filter.Write(dataBuffer, dataOffset); - - if (!isNew) - { - // Query bin names are specified as a field (Scan bin names are specified later as operations) - // in old servers. - if (statement.binNames != null && statement.binNames.Length > 0) - { - WriteFieldHeader(binNameSize, FieldType.QUERY_BINLIST); - dataBuffer[dataOffset++] = (byte)statement.binNames.Length; - - foreach (string binName in statement.binNames) - { - int len = ByteUtil.StringToUtf8(binName, dataBuffer, dataOffset + 1); - dataBuffer[dataOffset] = (byte)len; - dataOffset += len + 1; - } - } - } - - if (packedCtx != null) - { - WriteFieldHeader(packedCtx.Length, FieldType.INDEX_CONTEXT); - Array.Copy(packedCtx, 0, dataBuffer, dataOffset, packedCtx.Length); - dataOffset += packedCtx.Length; - } - } - - if (statement.functionName != null) - { - WriteFieldHeader(1, FieldType.UDF_OP); - dataBuffer[dataOffset++] = background ? (byte)2 : (byte)1; - WriteField(statement.packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(statement.functionName, FieldType.UDF_FUNCTION); - WriteField(functionArgBuffer, FieldType.UDF_ARGLIST); - } - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - if (partsFullSize > 0) - { - WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsFull) - { - ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); - dataOffset += 2; - } - } - - if (partsPartialDigestSize > 0) - { - WriteFieldHeader(partsPartialDigestSize, FieldType.DIGEST_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsPartial) - { - Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20); - dataOffset += 20; - } - } - - if (partsPartialBValSize > 0) - { - WriteFieldHeader(partsPartialBValSize, FieldType.BVAL_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsPartial) - { - ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset); - dataOffset += 8; - } - } - - if (maxRecords > 0) - { - WriteField((ulong)maxRecords, FieldType.MAX_RECORDS); - } - - if (statement.operations != null) - { - foreach (Operation operation in statement.operations) - { - WriteOperation(operation); - } - } - else if (statement.binNames != null && (isNew || statement.filter == null)) - { - foreach (string binName in statement.binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - End(); - } - - //-------------------------------------------------- - // Command Sizing - //-------------------------------------------------- - - private int EstimateKeySize(Policy policy, Key key) - { - int fieldCount = 0; - - if (key.ns != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (key.setName != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - fieldCount++; - } - - dataOffset += key.digest.Length + FIELD_HEADER_SIZE; - fieldCount++; - - if (policy.sendKey) - { - dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; - fieldCount++; - } - return fieldCount; - } - - private int EstimateUdfSize(string packageName, string functionName, byte[] bytes) - { - dataOffset += ByteUtil.EstimateSizeUtf8(packageName) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(functionName) + FIELD_HEADER_SIZE; - dataOffset += bytes.Length + FIELD_HEADER_SIZE; - return 3; - } - - private void EstimateOperationSize(Bin bin) - { - dataOffset += ByteUtil.EstimateSizeUtf8(bin.name) + OPERATION_HEADER_SIZE; - dataOffset += bin.value.EstimateSize(); - } - - private void EstimateOperationSize(Operation operation) - { - dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; - dataOffset += operation.value.EstimateSize(); - } - - private void EstimateReadOperationSize(Operation operation) - { - if (Operation.IsWrite(operation.type)) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read"); - } - dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; - dataOffset += operation.value.EstimateSize(); - } - - private void EstimateOperationSize(string binName) - { - dataOffset += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; - } - - private void EstimateOperationSize() - { - dataOffset += OPERATION_HEADER_SIZE; - } - - //-------------------------------------------------- - // Command Writes - //-------------------------------------------------- - - /// - /// Header write for write commands. - /// - private void WriteHeaderWrite(WritePolicy policy, int writeAttr, int fieldCount, int operationCount) - { - // Set flags. - int generation = 0; - int infoAttr = 0; - - switch (policy.recordExistsAction) - { - case RecordExistsAction.UPDATE: - break; - case RecordExistsAction.UPDATE_ONLY: - infoAttr |= Command.INFO3_UPDATE_ONLY; - break; - case RecordExistsAction.REPLACE: - infoAttr |= Command.INFO3_CREATE_OR_REPLACE; - break; - case RecordExistsAction.REPLACE_ONLY: - infoAttr |= Command.INFO3_REPLACE_ONLY; - break; - case RecordExistsAction.CREATE_ONLY: - writeAttr |= Command.INFO2_CREATE_ONLY; - break; - } - - switch (policy.generationPolicy) - { - case GenerationPolicy.NONE: - break; - case GenerationPolicy.EXPECT_GEN_EQUAL: - generation = policy.generation; - writeAttr |= Command.INFO2_GENERATION; - break; - case GenerationPolicy.EXPECT_GEN_GT: - generation = policy.generation; - writeAttr |= Command.INFO2_GENERATION_GT; - break; - } - - if (policy.commitLevel == CommitLevel.COMMIT_MASTER) - { - infoAttr |= Command.INFO3_COMMIT_MASTER; - } - - if (policy.durableDelete) - { - writeAttr |= Command.INFO2_DURABLE_DELETE; - } - - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)writeAttr; - dataBuffer[dataOffset++] = (byte)infoAttr; - dataBuffer[dataOffset++] = 0; // unused - dataBuffer[dataOffset++] = 0; // clear the result code - dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - } - - /// - /// Header write for operate command. - /// - private void WriteHeaderReadWrite - ( - WritePolicy policy, - OperateArgs args, - int fieldCount - ) - { - // Set flags. - int generation = 0; - int ttl = args.hasWrite ? policy.expiration : policy.readTouchTtlPercent; - int readAttr = args.readAttr; - int writeAttr = args.writeAttr; - int infoAttr = 0; - int operationCount = args.operations.Length; - - switch (policy.recordExistsAction) - { - case RecordExistsAction.UPDATE: - break; - case RecordExistsAction.UPDATE_ONLY: - infoAttr |= Command.INFO3_UPDATE_ONLY; - break; - case RecordExistsAction.REPLACE: - infoAttr |= Command.INFO3_CREATE_OR_REPLACE; - break; - case RecordExistsAction.REPLACE_ONLY: - infoAttr |= Command.INFO3_REPLACE_ONLY; - break; - case RecordExistsAction.CREATE_ONLY: - writeAttr |= Command.INFO2_CREATE_ONLY; - break; - } - - switch (policy.generationPolicy) - { - case GenerationPolicy.NONE: - break; - case GenerationPolicy.EXPECT_GEN_EQUAL: - generation = policy.generation; - writeAttr |= Command.INFO2_GENERATION; - break; - case GenerationPolicy.EXPECT_GEN_GT: - generation = policy.generation; - writeAttr |= Command.INFO2_GENERATION_GT; - break; - } - - if (policy.commitLevel == CommitLevel.COMMIT_MASTER) - { - infoAttr |= Command.INFO3_COMMIT_MASTER; - } - - if (policy.durableDelete) - { - writeAttr |= Command.INFO2_DURABLE_DELETE; - } - - switch (policy.readModeSC) - { - case ReadModeSC.SESSION: - break; - case ReadModeSC.LINEARIZE: - infoAttr |= Command.INFO3_SC_READ_TYPE; - break; - case ReadModeSC.ALLOW_REPLICA: - infoAttr |= Command.INFO3_SC_READ_RELAX; - break; - case ReadModeSC.ALLOW_UNAVAILABLE: - infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; - break; - } - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - if (policy.compress) - { - readAttr |= Command.INFO1_COMPRESS_RESPONSE; - } - - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)writeAttr; - dataBuffer[dataOffset++] = (byte)infoAttr; - dataBuffer[dataOffset++] = 0; // unused - dataBuffer[dataOffset++] = 0; // clear the result code - dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)ttl, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - } - - /// - /// Header write for read commands. - /// - private void WriteHeaderRead - ( - Policy policy, - int timeout, - int readAttr, - int writeAttr, - int infoAttr, - int fieldCount, - int operationCount - ) - { - switch (policy.readModeSC) - { - case ReadModeSC.SESSION: - break; - case ReadModeSC.LINEARIZE: - infoAttr |= Command.INFO3_SC_READ_TYPE; - break; - case ReadModeSC.ALLOW_REPLICA: - infoAttr |= Command.INFO3_SC_READ_RELAX; - break; - case ReadModeSC.ALLOW_UNAVAILABLE: - infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; - break; - } - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - if (policy.compress) - { - readAttr |= Command.INFO1_COMPRESS_RESPONSE; - } - - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)writeAttr; - dataBuffer[dataOffset++] = (byte)infoAttr; - - for (int i = 0; i < 6; i++) - { - dataBuffer[dataOffset++] = 0; - } - dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - } - - /// - /// Header write for read header commands. - /// - private void WriteHeaderReadHeader(Policy policy, int readAttr, int fieldCount, int operationCount) - { - int infoAttr = 0; - - switch (policy.readModeSC) - { - case ReadModeSC.SESSION: - break; - case ReadModeSC.LINEARIZE: - infoAttr |= Command.INFO3_SC_READ_TYPE; - break; - case ReadModeSC.ALLOW_REPLICA: - infoAttr |= Command.INFO3_SC_READ_RELAX; - break; - case ReadModeSC.ALLOW_UNAVAILABLE: - infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; - break; - } - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)infoAttr; - - for (int i = 0; i < 6; i++) - { - dataBuffer[dataOffset++] = 0; - } - dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - } - - private void WriteKey(Policy policy, Key key) - { - // Write key into dataBuffer. - if (key.ns != null) - { - WriteField(key.ns, FieldType.NAMESPACE); - } - - if (key.setName != null) - { - WriteField(key.setName, FieldType.TABLE); - } - - WriteField(key.digest, FieldType.DIGEST_RIPE); - - if (policy.sendKey) - { - WriteField(key.userKey, FieldType.KEY); - } - } - - private int WriteReadOnlyOperations(Operation[] ops, int readAttr) - { - bool readBin = false; - bool readHeader = false; - - foreach (Operation op in ops) - { - switch (op.type) - { - case Operation.Type.READ: - // Read all bins if no bin is specified. - if (op.binName == null) - { - readAttr |= Command.INFO1_GET_ALL; - } - readBin = true; - break; - - case Operation.Type.READ_HEADER: - readHeader = true; - break; - - default: - break; - } - WriteOperation(op); - } - - if (readHeader && !readBin) - { - readAttr |= Command.INFO1_NOBINDATA; - } - return readAttr; - } - - private void WriteOperation(Bin bin, Operation.Type operationType) - { - int nameLength = ByteUtil.StringToUtf8(bin.name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); - int valueLength = bin.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); - - ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); - dataBuffer[dataOffset++] = (byte) bin.value.Type; - dataBuffer[dataOffset++] = (byte) 0; - dataBuffer[dataOffset++] = (byte) nameLength; - dataOffset += nameLength + valueLength; - } - - private void WriteOperation(Operation operation) - { - int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); - int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); - - ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type); - dataBuffer[dataOffset++] = (byte) operation.value.Type; - dataBuffer[dataOffset++] = (byte) 0; - dataBuffer[dataOffset++] = (byte) nameLength; - dataOffset += nameLength + valueLength; - } - - private void WriteOperation(string name, Operation.Type operationType) - { - int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); - - ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); - dataBuffer[dataOffset++] = (byte) 0; - dataBuffer[dataOffset++] = (byte) 0; - dataBuffer[dataOffset++] = (byte) nameLength; - dataOffset += nameLength; - } - - private void WriteOperation(Operation.Type operationType) - { - ByteUtil.IntToBytes(4, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); - dataBuffer[dataOffset++] = 0; - dataBuffer[dataOffset++] = 0; - dataBuffer[dataOffset++] = 0; - } - - private void WriteField(Value value, int type) - { - int offset = dataOffset + FIELD_HEADER_SIZE; - dataBuffer[offset++] = (byte)value.Type; - int len = value.Write(dataBuffer, offset) + 1; - WriteFieldHeader(len, type); - dataOffset += len; - } - - private void WriteField(string str, int type) - { - int len = ByteUtil.StringToUtf8(str, dataBuffer, dataOffset + FIELD_HEADER_SIZE); - WriteFieldHeader(len, type); - dataOffset += len; - } - - private void WriteField(byte[] bytes, int type) - { - Array.Copy(bytes, 0, dataBuffer, dataOffset + FIELD_HEADER_SIZE, bytes.Length); - WriteFieldHeader(bytes.Length, type); - dataOffset += bytes.Length; - } - - private void WriteField(int val, int type) - { - WriteFieldHeader(4, type); - dataOffset += ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset); - } - - private void WriteField(ulong val, int type) - { - WriteFieldHeader(8, type); - dataOffset += ByteUtil.LongToBytes(val, dataBuffer, dataOffset); - } - - private void WriteFieldHeader(int size, int type) - { - dataOffset += ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); - dataBuffer[dataOffset++] = (byte)type; - } - - internal virtual void WriteExpHeader(int size) - { - WriteFieldHeader(size, FieldType.FILTER_EXP); - } - - private void Begin() - { - dataOffset = MSG_TOTAL_HEADER_SIZE; - } - - private bool SizeBuffer(Policy policy) - { - if (policy.compress && dataOffset > COMPRESS_THRESHOLD) - { - // Command will be compressed. First, write uncompressed command - // into separate dataBuffer. Save normal dataBuffer for compressed command. - // Normal dataBuffer in async mode is from dataBuffer pool that is used to - // minimize memory pinning during socket operations. - dataBuffer = new byte[dataOffset]; - dataOffset = 0; - return true; - } - else - { - // Command will be uncompressed. - SizeBuffer(); - return false; - } - } - - private void End(bool compress) - { - if (!compress) - { - End(); - return; - } - - // Write proto header. - ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48); - ByteUtil.LongToBytes(size, dataBuffer, 0); - - byte[] srcBuf = dataBuffer; - int srcSize = dataOffset; - - // Increase requested dataBuffer size in case compressed dataBuffer size is - // greater than the uncompressed dataBuffer size. - dataOffset += 16 + 100; - - // This method finds dataBuffer of requested size, resets dataOffset to segment offset - // and returns dataBuffer max size; - int trgBufSize = SizeBuffer(); - - // Compress to target starting at new dataOffset plus new header. - int trgSize = ByteUtil.Compress(srcBuf, srcSize, dataBuffer, dataOffset + 16, trgBufSize - 16) + 16; - - ulong proto = ((ulong)trgSize - 8) | (CL_MSG_VERSION << 56) | (MSG_TYPE_COMPRESSED << 48); - ByteUtil.LongToBytes(proto, dataBuffer, dataOffset); - ByteUtil.LongToBytes((ulong)srcSize, dataBuffer, dataOffset + 8); - SetLength(trgSize); - } - - protected internal abstract int SizeBuffer(); - protected internal abstract void End(); - protected internal abstract void SetLength(int length); - - //-------------------------------------------------- - // Response Parsing - //-------------------------------------------------- - - internal virtual void SkipKey(int fieldCount) - { - // There can be fields in the response (setname etc). - // But for now, ignore them. Expose them to the API if needed in the future. - for (int i = 0; i < fieldCount; i++) - { - int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4 + fieldlen; - } - } - - internal virtual Key ParseKey(int fieldCount, out ulong bval) - { - byte[] digest = null; - string ns = null; - string setName = null; - Value userKey = null; - bval = 0; - - for (int i = 0; i < fieldCount; i++) - { - int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int fieldtype = dataBuffer[dataOffset++]; - int size = fieldlen - 1; - - switch (fieldtype) - { - case FieldType.DIGEST_RIPE: - digest = new byte[size]; - Array.Copy(dataBuffer, dataOffset, digest, 0, size); - break; - - case FieldType.NAMESPACE: - ns = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size); - break; - - case FieldType.TABLE: - setName = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size); - break; - - case FieldType.KEY: - int type = dataBuffer[dataOffset++]; - size--; - userKey = ByteUtil.BytesToKeyValue((ParticleType)type, dataBuffer, dataOffset, size); - break; - - case FieldType.BVAL_ARRAY: - bval = (ulong)ByteUtil.LittleBytesToLong(dataBuffer, dataOffset); - break; - } - dataOffset += size; - } - return new Key(ns, digest, setName, userKey); - } - - public static bool BatchInDoubt(bool isWrite, int commandSentCounter) - { - return isWrite && commandSentCounter > 1; - } - } -} -#pragma warning restore 0618 +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System.Collections; + +#pragma warning disable 0618 + +namespace Aerospike.Client +{ + public abstract class Command + { + public static readonly int INFO1_READ = (1 << 0); // Contains a read operation. + public static readonly int INFO1_GET_ALL = (1 << 1); // Get all bins. + public static readonly int INFO1_SHORT_QUERY = (1 << 2); // Short query. + public static readonly int INFO1_BATCH = (1 << 3); // Batch read or exists. + public static readonly int INFO1_NOBINDATA = (1 << 5); // Do not read the bins. + public static readonly int INFO1_READ_MODE_AP_ALL = (1 << 6); // Involve all replicas in read operation. + public static readonly int INFO1_COMPRESS_RESPONSE = (1 << 7); // Tell server to compress it's response. + + public static readonly int INFO2_WRITE = (1 << 0); // Create or update record + public static readonly int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch. + public static readonly int INFO2_GENERATION = (1 << 2); // Update if expected generation == old. + public static readonly int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore. + public static readonly int INFO2_DURABLE_DELETE = (1 << 4); // Command resulting in record deletion leaves tombstone (Enterprise only). + public static readonly int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists. + public static readonly int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relac read consistency + public static readonly int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation. + + public static readonly int INFO3_LAST = (1 << 0); // This is the last of a multi-part message. + public static readonly int INFO3_COMMIT_MASTER = (1 << 1); // Commit to master only before declaring success. + // On send: Do not return partition done in scan/query. + // On receive: Specified partition is done in scan/query. + public static readonly int INFO3_PARTITION_DONE = (1 << 2); + public static readonly int INFO3_UPDATE_ONLY = (1 << 3); // Update only. Merge bins. + public static readonly int INFO3_CREATE_OR_REPLACE = (1 << 4); // Create or completely replace record. + public static readonly int INFO3_REPLACE_ONLY = (1 << 5); // Completely replace existing record only. + public static readonly int INFO3_SC_READ_TYPE = (1 << 6); // See below. + public static readonly int INFO3_SC_READ_RELAX = (1 << 7); // See below. + + // Interpret SC_READ bits in info3. + // + // RELAX TYPE + // strict + // ------ + // 0 0 sequential (default) + // 0 1 linearize + // + // relaxed + // ------- + // 1 0 allow replica + // 1 1 allow unavailable + + public static readonly int INFO4_MRT_VERIFY_READ = (1 << 0); // Send MRT version to the server to be verified. + public static readonly int INFO4_MRT_ROLL_FORWARD = (1 << 1); // Roll forward MRT. + public static readonly int INFO4_MRT_ROLL_BACK = (1 << 2); // Roll back MRT. + + public const byte STATE_READ_AUTH_HEADER = 1; + public const byte STATE_READ_HEADER = 2; + public const byte STATE_READ_DETAIL = 3; + public const byte STATE_COMPLETE = 4; + + public const byte BATCH_MSG_READ = 0x0; + public const byte BATCH_MSG_REPEAT = 0x1; + public const byte BATCH_MSG_INFO = 0x2; + public const byte BATCH_MSG_GEN = 0x4; + public const byte BATCH_MSG_TTL = 0x8; + public const byte BATCH_MSG_INFO4 = 0x10; + + public const int MSG_TOTAL_HEADER_SIZE = 30; + public const int FIELD_HEADER_SIZE = 5; + public const int OPERATION_HEADER_SIZE = 8; + public const int MSG_REMAINING_HEADER_SIZE = 22; + public const int COMPRESS_THRESHOLD = 128; + public const ulong CL_MSG_VERSION = 2UL; + public const ulong AS_MSG_TYPE = 3UL; + public const ulong MSG_TYPE_COMPRESSED = 4UL; + + internal byte[] dataBuffer; + internal int dataOffset; + internal readonly int maxRetries; + internal readonly int serverTimeout; + internal int socketTimeout; + internal int totalTimeout; + internal long? Version; + + protected int resultCode; + protected int generation; + protected int expiration; + protected int fieldCount; + protected int opCount; + + public Command(int socketTimeout, int totalTimeout, int maxRetries) + { + this.maxRetries = maxRetries; + this.totalTimeout = totalTimeout; + + if (totalTimeout > 0) + { + this.socketTimeout = (socketTimeout < totalTimeout && socketTimeout > 0) ? socketTimeout : totalTimeout; + this.serverTimeout = this.socketTimeout; + } + else + { + this.socketTimeout = socketTimeout; + this.serverTimeout = 0; + } + + resultCode = 0; + generation = 0; + expiration = 0; + fieldCount = 0; + opCount = 0; + } + + //-------------------------------------------------- + // Multi-record Transactions + //-------------------------------------------------- + + public void SetTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) + { + Begin(); + int fieldCount = EstimateKeySize(key); + dataOffset += args.size; + + bool compress = SizeBuffer(policy); + + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)args.readAttr; + dataBuffer[dataOffset++] = (byte)args.writeAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)args.operations.Length, dataBuffer, dataOffset); + + WriteKey(key); + + foreach (Operation operation in args.operations) + { + WriteOperation(operation); + } + + End(compress); + } + + public void SetTxnVerify(Key key, long ver) + { + Begin(); + int fieldCount = EstimateKeySize(key); + + // Version field. + dataOffset += 7 + FIELD_HEADER_SIZE; + fieldCount++; + + SizeBuffer(); + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; + dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; + dataBuffer[dataOffset++] = 0; + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); + + WriteKey(key); + WriteFieldVersion(ver); + End(); + } + + public void SetBatchTxnVerify( + BatchPolicy policy, + Key[] keys, + long?[] versions, + BatchNode batch + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchTxnVerify(policy, keys, versions, offsets); + } + + public void SetBatchTxnVerify( + BatchPolicy policy, + Key[] keys, + long?[] versions, + BatchOffsets offsets + ) + { + // Estimate buffer size. + Begin(); + + // Batch field + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + long? verPrev = null; + int max = offsets.Size(); + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[offset]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 9; // header(4) + info4(1) + fieldCount(2) + opCount(2) = 9 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + + if (ver.HasValue) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + } + keyPrev = key; + verPrev = ver; + } + } + + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, 1); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[offset]; + + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4); + dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; + dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; + + int fieldCount = 0; + + if (ver.HasValue) + { + fieldCount++; + } + + WriteBatchFields(key, fieldCount, 0); + + if (ver.HasValue) + { + WriteFieldVersion(ver.Value); + } + + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(compress); + } + + public void SetTxnMarkRollForward(Key key) + { + Bin bin = new("fwd", true); + + Begin(); + int fieldCount = EstimateKeySize(key); + EstimateOperationSize(bin); + //SizeBuffer(); + WriteTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); + WriteOperation(bin, Operation.Type.WRITE); + End(); + } + + public void SetTxnRoll(Key key, Txn txn, int txnAttr) + { + Begin(); + int fieldCount = EstimateKeySize(key); + + fieldCount += SizeTxn(key, txn, false); + + SizeBuffer(); + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)(Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)txnAttr; + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); + + WriteKey(key); + WriteTxn(txn, false); + End(); + } + + public void SetBatchTxnRoll( + BatchPolicy policy, + Txn txn, + Key[] keys, + BatchNode batch, + BatchAttr attr + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchTxnRoll(policy, txn, keys, attr, offsets); + } + + public void SetBatchTxnRoll( + BatchPolicy policy, + Txn txn, + Key[] keys, + BatchAttr attr, + BatchOffsets offsets + ) + { + // Estimate buffer size. + Begin(); + int fieldCount = 1; + int max = offsets.Size(); + long?[] versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = txn.GetReadVersion(key); + } + + // Batch field + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + long? verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTxnBatch(txn, ver, attr.hasWrite); + dataOffset += 2; // gen(2) = 2 + keyPrev = key; + verPrev = ver; + } + } + + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[i]; + + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + WriteBatchWrite(key, txn, ver, attr, null, 0, 0); + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(compress); + } + + public void SetTxnClose(Txn txn, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(key); + //SizeBuffer(); + WriteTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, + fieldCount, 0); + End(); + } + + private void WriteTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) + { + SizeBuffer(); + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); + + WriteKey(key); + } + + //-------------------------------------------------- + // Writes + //-------------------------------------------------- + + public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, true); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + foreach (Bin bin in bins) + { + EstimateOperationSize(bin); + } + + bool compress = SizeBuffer(policy); + + WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length); + WriteKey(policy, key, true); + + policy.filterExp?.Write(this); + + foreach (Bin bin in bins) + { + WriteOperation(bin, operation); + } + End(compress); + } + + public virtual void SetDelete(WritePolicy policy, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, true); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + SizeBuffer(); + WriteHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0); + WriteKey(policy, key, true); + + policy.filterExp?.Write(this); + End(); + } + + public void SetDelete(Policy policy, Key key, BatchAttr attr) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + SizeBuffer(); + WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); + End(); + } + + public virtual void SetTouch(WritePolicy policy, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, true); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + EstimateOperationSize(); + SizeBuffer(); + WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1); + WriteKey(policy, key, true); + + policy.filterExp?.Write(this); + WriteOperation(Operation.Type.TOUCH); + End(); + } + + //-------------------------------------------------- + // Reads + //-------------------------------------------------- + + public virtual void SetExists(Policy policy, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, false); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + SizeBuffer(); + WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); + WriteKey(policy, key, false); + + policy.filterExp?.Write(this); + End(); + } + + public virtual void SetRead(Policy policy, Key key, string[] binNames) + { + int readAttr = Command.INFO1_READ; + int opCount = 0; + + if (binNames != null && binNames.Length > 0) + { + opCount = binNames.Length; + } + else + { + readAttr |= Command.INFO1_GET_ALL; + } + + Begin(); + int fieldCount = EstimateKeySize(policy, key, false); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + if (opCount != 0) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + + SizeBuffer(); + WriteHeaderRead(policy, serverTimeout, readAttr, 0, 0, fieldCount, opCount); + WriteKey(policy, key, false); + + policy.filterExp?.Write(this); + + if (opCount != 0) + { + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + End(); + } + + public void SetRead(Policy policy, BatchRead br) + { + Begin(); + + BatchReadPolicy rp = br.policy; + BatchAttr attr = new(); + Expression exp; + int opCount; + + if (rp != null) + { + attr.SetRead(rp); + exp = rp.filterExp ?? policy.filterExp; + } + else + { + attr.SetRead(policy); + exp = policy.filterExp; + } + + if (br.binNames != null) + { + opCount = br.binNames.Length; + + foreach (string binName in br.binNames) + { + EstimateOperationSize(binName); + } + } + else if (br.ops != null) + { + attr.AdjustRead(br.ops); + opCount = br.ops.Length; + + foreach (Operation op in br.ops) + { + if (Operation.IsWrite(op.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read"); + } + EstimateOperationSize(op); + } + } + else + { + attr.AdjustRead(br.readAllBins); + opCount = 0; + } + + int fieldCount = EstimateKeyAttrSize(policy, br.key, attr, exp); + + SizeBuffer(); + WriteKeyAttr(policy, br.key, attr, exp, fieldCount, opCount); + + if (br.binNames != null) + { + foreach (string binName in br.binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + else if (br.ops != null) + { + foreach (Operation op in br.ops) + { + WriteOperation(op); + } + } + End(); + } + + public void SetRead(Policy policy, Key key, Operation[] ops) + { + Begin(); + + BatchAttr attr = new(); + attr.SetRead(policy); + attr.AdjustRead(ops); + + int fieldCount = EstimateKeyAttrSize(policy, key, attr, policy.filterExp); + + foreach (Operation op in ops) + { + if (Operation.IsWrite(op.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read"); + } + EstimateOperationSize(op); + } + + SizeBuffer(); + WriteKeyAttr(policy, key, attr, policy.filterExp, fieldCount, ops.Length); + + foreach (Operation op in ops) + { + WriteOperation(op); + } + End(); + } + + public virtual void SetReadHeader(Policy policy, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, false); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + SizeBuffer(); + WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); + WriteKey(policy, key, false); + + policy.filterExp?.Write(this); + End(); + } + + //-------------------------------------------------- + // Operate + //-------------------------------------------------- + + public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, args.hasWrite); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + dataOffset += args.size; + + bool compress = SizeBuffer(policy); + + WriteHeaderReadWrite(policy, args, fieldCount); + WriteKey(policy, key, args.hasWrite); + + policy.filterExp?.Write(this); + + foreach (Operation operation in args.operations) + { + WriteOperation(operation); + } + End(compress); + } + + public void SetOperate(Policy policy, BatchAttr attr, Key key, Operation[] ops) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + + dataOffset += attr.opSize; + bool compress = SizeBuffer(policy); + WriteKeyAttr(policy, key, attr, exp, fieldCount, ops.Length); + + foreach (Operation op in ops) + { + WriteOperation(op); + } + End(compress); + } + + + //-------------------------------------------------- + // UDF + //-------------------------------------------------- + + public virtual void SetUdf(WritePolicy policy, Key key, string packageName, string functionName, Value[] args) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, true); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + byte[] argBytes = Packer.Pack(args); + fieldCount += EstimateUdfSize(packageName, functionName, argBytes); + + bool compress = SizeBuffer(policy); + + WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0); + WriteKey(policy, key, true); + + policy.filterExp?.Write(this); + WriteField(packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(functionName, FieldType.UDF_FUNCTION); + WriteField(argBytes, FieldType.UDF_ARGLIST); + End(compress); + } + + public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, Value[] args) + { + byte[] argBytes = Packer.Pack(args); + SetUdf(policy, attr, key, packageName, functionName, argBytes); + } + + public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, byte[] argBytes) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + fieldCount += EstimateUdfSize(packageName, functionName, argBytes); + + bool compress = SizeBuffer(policy); + WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); + WriteField(packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(functionName, FieldType.UDF_FUNCTION); + WriteField(argBytes, FieldType.UDF_ARGLIST); + End(compress); + } + + //-------------------------------------------------- + // Batch Read Only + //-------------------------------------------------- + + public virtual void SetBatchRead(BatchPolicy policy, List records, BatchNode batch) + { + // Estimate full row size + int[] offsets = batch.offsets; + int max = batch.offsetsSize; + BatchRead prev = null; + + Begin(); + int fieldCount = 1; + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + dataOffset += FIELD_HEADER_SIZE + 5; + + for (int i = 0; i < max; i++) + { + BatchRead record = records[offsets[i]]; + Key key = record.key; + string[] binNames = record.binNames; + Operation[] ops = record.ops; + + dataOffset += key.digest.Length + 4; + + // Avoid relatively expensive full equality checks for performance reasons. + // Use reference equality only in hope that common namespaces/bin names are set from + // fixed variables. It's fine if equality not determined correctly because it just + // results in more space used. The batch will still be correct. + if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && + prev.binNames == binNames && prev.readAllBins == record.readAllBins && + prev.ops == ops) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Estimate full header, namespace and bin names. + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + + if (binNames != null) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + else if (ops != null) + { + foreach (Operation op in ops) + { + EstimateReadOperationSize(op); + } + } + prev = record; + } + } + + bool compress = SizeBuffer(policy); + + int readAttr = Command.INFO1_READ; + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); + + policy.filterExp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; + prev = null; + + for (int i = 0; i < max; i++) + { + int index = offsets[i]; + ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); + dataOffset += 4; + + BatchRead record = records[index]; + Key key = record.key; + string[] binNames = record.binNames; + Operation[] ops = record.ops; + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + // Avoid relatively expensive full equality checks for performance reasons. + // Use reference equality only in hope that common namespaces/bin names are set from + // fixed variables. It's fine if equality not determined correctly because it just + // results in more space used. The batch will still be correct. + if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && + prev.binNames == binNames && prev.readAllBins == record.readAllBins && + prev.ops == ops) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full header, namespace and bin names. + dataBuffer[dataOffset++] = BATCH_MSG_READ; + + if (binNames != null && binNames.Length != 0) + { + dataBuffer[dataOffset++] = (byte)readAttr; + WriteBatchFields(key, 0, binNames.Length); + + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + else if (ops != null) + { + int offset = dataOffset++; + WriteBatchFields(key, 0, ops.Length); + dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr); + } + else + { + dataBuffer[dataOffset++] = (byte)(readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA)); + WriteBatchFields(key, 0, 0); + } + prev = record; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); + End(compress); + } + + public virtual void SetBatchRead + ( + BatchPolicy policy, + Key[] keys, + BatchNode batch, + string[] binNames, + Operation[] ops, + int readAttr + ) + { + // Estimate full row size + int[] offsets = batch.offsets; + int max = batch.offsetsSize; + + // Estimate dataBuffer size. + Begin(); + int fieldCount = 1; + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + dataOffset += FIELD_HEADER_SIZE + 5; + + Key prev = null; + + for (int i = 0; i < max; i++) + { + Key key = keys[offsets[i]]; + + dataOffset += key.digest.Length + 4; + + // Try reference equality in hope that namespace for all keys is set from a fixed variable. + if (prev != null && prev.ns == key.ns && prev.setName == key.setName) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Estimate full header, namespace and bin names. + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + + if (binNames != null) + { + foreach (String binName in binNames) + { + EstimateOperationSize(binName); + } + } + else if (ops != null) + { + foreach (Operation op in ops) + { + EstimateReadOperationSize(op); + } + } + prev = key; + } + } + + bool compress = SizeBuffer(policy); + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); + + policy.filterExp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; + prev = null; + + for (int i = 0; i < max; i++) + { + int index = offsets[i]; + ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); + dataOffset += 4; + + Key key = keys[index]; + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + // Try reference equality in hope that namespace for all keys is set from a fixed variable. + if (prev != null && prev.ns == key.ns && prev.setName == key.setName) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full header, namespace and bin names. + dataBuffer[dataOffset++] = BATCH_MSG_READ; + + if (binNames != null && binNames.Length != 0) + { + dataBuffer[dataOffset++] = (byte)readAttr; + WriteBatchFields(key, 0, binNames.Length); + + foreach (String binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + else if (ops != null) + { + int offset = dataOffset++; + WriteBatchFields(key, 0, ops.Length); + dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr); + } + else + { + dataBuffer[dataOffset++] = (byte)readAttr; + WriteBatchFields(key, 0, 0); + } + prev = key; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); + End(compress); + } + + //-------------------------------------------------- + // Batch Read/Write Operations + //-------------------------------------------------- + + public virtual void SetBatchOperate( + BatchPolicy policy, + IList records, + BatchNode batch) + { + BatchOffsetsNative offsets = new(batch); + SetBatchOperate(policy, records, offsets); + } + + public void SetBatchOperate( + BatchPolicy policy, + IList records, + BatchOffsets offsets) + { + Begin(); + int max = offsets.Size(); + Txn txn = policy.Txn; + long?[] versions = null; + + if (txn != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; + versions[i] = txn.GetReadVersion(record.key); + } + } + + int fieldCount = 1; + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + dataOffset += FIELD_HEADER_SIZE + 5; + + BatchRecord prev = null; + long? verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; + Key key = record.key; + long? ver = versions?[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(policy, key, record, prev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Estimate full header, namespace and bin names. + dataOffset += 12; + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTxnBatch(txn, ver, record.hasWrite); + dataOffset += record.Size(policy); + prev = record; + verPrev = ver; + } + } + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + policy.filterExp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + + BatchAttr attr = new(); + prev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; + long? ver = versions?[i]; + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + Key key = record.key; + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(policy, key, record, prev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + switch (record.GetBatchType()) + { + case BatchRecord.Type.BATCH_READ: + { + BatchRead br = (BatchRead)record; + + if (br.policy != null) + { + attr.SetRead(br.policy); + } + else + { + attr.SetRead(policy); + } + + if (br.binNames != null) + { + if (br.binNames.Length > 0) + { + WriteBatchBinNames(key, txn, ver, br.binNames, attr, attr.filterExp); + } + else + { + attr.AdjustRead(true); + WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0); + } + } + else if (br.ops != null) + { + attr.AdjustRead(br.ops); + WriteBatchOperations(key, txn, ver, br.ops, attr, attr.filterExp); + } + else + { + attr.AdjustRead(br.readAllBins); + WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0); + } + break; + } + + case BatchRecord.Type.BATCH_WRITE: + { + BatchWrite bw = (BatchWrite)record; + + if (bw.policy != null) + { + attr.SetWrite(bw.policy); + } + else + { + attr.SetWrite(policy); + } + attr.AdjustWrite(bw.ops); + WriteBatchOperations(key, txn, ver, bw.ops, attr, attr.filterExp); + break; + } + + case BatchRecord.Type.BATCH_UDF: + { + BatchUDF bu = (BatchUDF)record; + + if (bu.policy != null) + { + attr.SetUDF(bu.policy); + } + else + { + attr.SetUDF(policy); + } + WriteBatchWrite(key, txn, ver, attr, attr.filterExp, 3, 0); + WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(bu.functionName, FieldType.UDF_FUNCTION); + WriteField(bu.argBytes, FieldType.UDF_ARGLIST); + break; + } + + case BatchRecord.Type.BATCH_DELETE: + { + BatchDelete bd = (BatchDelete)record; + + if (bd.policy != null) + { + attr.SetDelete(bd.policy); + } + else + { + attr.SetDelete(policy); + } + WriteBatchWrite(key, txn, ver, attr, attr.filterExp, 0, 0); + break; + } + } + prev = record; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); + End(compress); + } + + public virtual void SetBatchOperate + ( + BatchPolicy policy, + Key[] keys, + BatchNode batch, + string[] binNames, + Operation[] ops, + BatchAttr attr + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchOperate(policy, keys, binNames, ops, attr, offsets); + } + + public void SetBatchOperate( + BatchPolicy policy, + Key[] keys, + string[] binNames, + Operation[] ops, + BatchAttr attr, + BatchOffsets offsets + ) + { + // Estimate full row size + int max = offsets.Size(); + Txn txn = policy.Txn; + long?[] versions = null; + + Begin(); + + if (txn != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = txn.GetReadVersion(key); + } + } + + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = 1; + + if (exp != null) + { + dataOffset += exp.Size(); + fieldCount++; + } + + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + long? verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTxnBatch(txn, ver, attr.hasWrite); + + if (attr.sendKey) + { + dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; + } + + if (binNames != null) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + else if (ops != null) + { + foreach (Operation op in ops) + { + if (Operation.IsWrite(op.type)) + { + if (!attr.hasWrite) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read"); + } + dataOffset += 2; // Extra write specific fields. + } + EstimateOperationSize(op); + } + } + else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) + { + dataOffset += 2; // Extra write specific fields. + } + keyPrev = key; + verPrev = ver; + } + } + + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + exp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + if (binNames != null) + { + WriteBatchBinNames(key, txn, ver, binNames, attr, null); + } + else if (ops != null) + { + WriteBatchOperations(key, txn, ver, ops, attr, null); + } + else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) + { + WriteBatchWrite(key, txn, ver, attr, null, 0, 0); + } + else + { + WriteBatchRead(key, txn, ver, attr, null, 0); + } + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(compress); + } + + public virtual void SetBatchUDF( + BatchPolicy policy, + Key[] keys, + BatchNode batch, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchUDF(policy, keys, packageName, functionName, argBytes, attr, offsets); + } + + public virtual void SetBatchUDF + ( + BatchPolicy policy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr, + BatchOffsets offsets + ) + { + // Estimate buffer size. + Begin(); + int max = offsets.Size(); + Txn txn = policy.Txn; + long?[] versions = null; + + if (txn != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = txn.GetReadVersion(key); + } + } + + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = 1; + + if (exp != null) + { + dataOffset += exp.Size(); + fieldCount++; + } + + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + long? verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTxnBatch(txn, ver, attr.hasWrite); + + if (attr.sendKey) + { + dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; + } + dataOffset += 2; // gen(2) = 2 + EstimateUdfSize(packageName, functionName, argBytes); + keyPrev = key; + verPrev = ver; + } + } + + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + exp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + WriteBatchWrite(key, txn, ver, attr, null, 3, 0); + WriteField(packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(functionName, FieldType.UDF_FUNCTION); + WriteField(argBytes, FieldType.UDF_ARGLIST); + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(compress); + } + + private static bool CanRepeat( + Policy policy, + Key key, + BatchRecord record, + BatchRecord prev, + long? ver, + long? verPrev + ) + { + // Avoid relatively expensive full equality checks for performance reasons. + // Use reference equality only in hope that common namespaces/bin names are set from + // fixed variables. It's fine if equality not determined correctly because it just + // results in more space used. The batch will still be correct. + // Same goes for ver reference equality check. + return !policy.sendKey && verPrev == ver && prev != null && prev.key.ns == key.ns && + prev.key.setName == key.setName && record.Equals(prev); + } + + private static bool CanRepeat(BatchAttr attr, Key key, Key keyPrev, long? ver, long? verPrev) + { + return !attr.sendKey && verPrev == ver && keyPrev != null && keyPrev.ns == key.ns && + keyPrev.setName == key.setName; + } + + private static bool CanRepeat(Key key, Key keyPrev, long? ver, long? verPrev) + { + return verPrev == ver && keyPrev != null && keyPrev.ns == key.ns && + keyPrev.setName == key.setName; + } + + private static Expression GetBatchExpression(Policy policy, BatchAttr attr) + { + return attr.filterExp ?? policy.filterExp; + } + + private static byte GetBatchFlags(BatchPolicy policy) + { + byte flags = 0x8; + + if (policy.allowInline) + { + flags |= 0x1; + } + + if (policy.allowInlineSSD) + { + flags |= 0x2; + } + + if (policy.respondAllKeys) + { + flags |= 0x4; + } + return flags; + } + + private void SizeTxnBatch(Txn txn, long? ver, bool hasWrite) + { + if (txn != null) + { + dataOffset++; // Add info4 byte for MRT. + dataOffset += 8 + FIELD_HEADER_SIZE; + + if (ver.HasValue) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + } + + if (hasWrite && txn.Deadline != 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + } + } + } + + private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) + { + int readAttr = Command.INFO1_BATCH; + + if (policy.compress) + { + readAttr |= Command.INFO1_COMPRESS_RESPONSE; + } + + // Write all header data except total size which must be written last. + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)0; + + Array.Clear(dataBuffer, dataOffset, 10); + dataOffset += 10; + + dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); + } + + private void WriteBatchBinNames(Key key, Txn txn, long? ver, string[] binNames, BatchAttr attr, Expression filter) + { + WriteBatchRead(key, txn, ver, attr, filter, binNames.Length); + + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + + private void WriteBatchOperations(Key key, Txn txn, long? ver, Operation[] ops, BatchAttr attr, Expression filter) + { + if (attr.hasWrite) + { + WriteBatchWrite(key, txn, ver, attr, filter, 0, ops.Length); + } + else + { + WriteBatchRead(key, txn, ver, attr, filter, ops.Length); + } + + foreach (Operation op in ops) + { + WriteOperation(op); + } + } + + private void WriteBatchRead(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int opCount) + { + if (txn != null) + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.txnAttr; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsTxn(key, txn, ver, attr, filter, 0, opCount); + } + else + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsReg(key, attr, filter, 0, opCount); + } + } + + private void WriteBatchWrite(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + { + if (txn != null) + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.txnAttr; + ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsTxn(key, txn, ver, attr, filter, fieldCount, opCount); + } + else + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.IntToBytes((ushort)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsReg(key, attr, filter, fieldCount, opCount); + } + } + + private void WriteBatchFieldsTxn(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + { + fieldCount++; + + if (ver.HasValue) + { + fieldCount++; + } + + if (attr.hasWrite && txn.Deadline != 0) + { + fieldCount++; + } + + if (filter != null) + { + fieldCount++; + } + + if (attr.sendKey) + { + fieldCount++; + } + + WriteBatchFields(key, fieldCount, opCount); + + WriteFieldLE(txn.Id, FieldType.MRT_ID); + + if (ver.HasValue) + { + WriteFieldVersion(ver.Value); + } + + if (attr.hasWrite && txn.Deadline != 0) + { + WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE); + } + + filter?.Write(this); + + if (attr.sendKey) + { + WriteField(key.userKey, FieldType.KEY); + } + } + + private void WriteBatchFieldsReg( + Key key, + BatchAttr attr, + Expression filter, + int fieldCount, + int opCount + ) + { + if (filter != null) + { + fieldCount++; + } + + if (attr.sendKey) + { + fieldCount++; + } + + WriteBatchFields(key, fieldCount, opCount); + + filter?.Write(this); + + if (attr.sendKey) + { + WriteField(key.userKey, FieldType.KEY); + } + } + + private void WriteBatchFields(Key key, int fieldCount, int opCount) + { + fieldCount += 2; + ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); + dataOffset += 2; + WriteField(key.ns, FieldType.NAMESPACE); + WriteField(key.setName, FieldType.TABLE); + } + + //-------------------------------------------------- + // Scan + //-------------------------------------------------- + + public virtual void SetScan + ( + Cluster cluster, + ScanPolicy policy, + string ns, + string setName, + string[] binNames, + ulong taskId, + NodePartitions nodePartitions + ) + { + Begin(); + int fieldCount = 0; + int partsFullSize = nodePartitions.partsFull.Count * 2; + int partsPartialSize = nodePartitions.partsPartial.Count * 20; + long maxRecords = nodePartitions.recordMax; + + if (ns != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(ns) + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (setName != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(setName) + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (partsFullSize > 0) + { + dataOffset += partsFullSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (partsPartialSize > 0) + { + dataOffset += partsPartialSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (maxRecords > 0) + { + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (policy.recordsPerSecond > 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + // Estimate scan timeout size. + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + + // Estimate taskId size. + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + + if (binNames != null) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + + SizeBuffer(); + int readAttr = Command.INFO1_READ; + + if (!policy.includeBinData) + { + readAttr |= Command.INFO1_NOBINDATA; + } + + // Clusters that support partition queries also support not sending partition done messages. + int operationCount = (binNames == null) ? 0 : binNames.Length; + WriteHeaderRead(policy, totalTimeout, readAttr, 0, Command.INFO3_PARTITION_DONE, fieldCount, operationCount); + + if (ns != null) + { + WriteField(ns, FieldType.NAMESPACE); + } + + if (setName != null) + { + WriteField(setName, FieldType.TABLE); + } + + if (partsFullSize > 0) + { + WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsFull) + { + ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); + dataOffset += 2; + } + } + + if (partsPartialSize > 0) + { + WriteFieldHeader(partsPartialSize, FieldType.DIGEST_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsPartial) + { + Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20); + dataOffset += 20; + } + } + + if (maxRecords > 0) + { + WriteField((ulong)maxRecords, FieldType.MAX_RECORDS); + } + + if (policy.recordsPerSecond > 0) + { + WriteField(policy.recordsPerSecond, FieldType.RECORDS_PER_SECOND); + } + + policy.filterExp?.Write(this); + + // Write scan timeout + WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); + + // Write taskId field + WriteField(taskId, FieldType.QUERY_ID); + + if (binNames != null) + { + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + End(); + } + + //-------------------------------------------------- + // Query + //-------------------------------------------------- + + protected virtual internal void SetQuery + ( + Cluster cluster, + Policy policy, + Statement statement, + ulong taskId, + bool background, + NodePartitions nodePartitions + ) + { + byte[] functionArgBuffer = null; + int fieldCount = 0; + int filterSize = 0; + int binNameSize = 0; + bool isNew = cluster.hasPartitionQuery; + + Begin(); + + if (statement.ns != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(statement.ns) + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (statement.setName != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(statement.setName) + FIELD_HEADER_SIZE; + fieldCount++; + } + + // Estimate recordsPerSecond field size. This field is used in new servers and not used + // (but harmless to add) in old servers. + if (statement.recordsPerSecond > 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + } + + // Estimate socket timeout field size. This field is used in new servers and not used + // (but harmless to add) in old servers. + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + + // Estimate taskId field. + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + + byte[] packedCtx = null; + + if (statement.filter != null) + { + IndexCollectionType type = statement.filter.CollectionType; + + // Estimate INDEX_TYPE field. + if (type != IndexCollectionType.DEFAULT) + { + dataOffset += FIELD_HEADER_SIZE + 1; + fieldCount++; + } + + // Estimate INDEX_RANGE field. + dataOffset += FIELD_HEADER_SIZE; + filterSize++; // num filters + filterSize += statement.filter.EstimateSize(); + dataOffset += filterSize; + fieldCount++; + + if (!isNew) + { + // Query bin names are specified as a field (Scan bin names are specified later as operations) + // in old servers. Estimate size for selected bin names. + if (statement.binNames != null && statement.binNames.Length > 0) + { + dataOffset += FIELD_HEADER_SIZE; + binNameSize++; // num bin names + + foreach (string binName in statement.binNames) + { + binNameSize += ByteUtil.EstimateSizeUtf8(binName) + 1; + } + dataOffset += binNameSize; + fieldCount++; + } + } + + packedCtx = statement.filter.PackedCtx; + + if (packedCtx != null) + { + dataOffset += FIELD_HEADER_SIZE + packedCtx.Length; + fieldCount++; + } + } + + // Estimate aggregation/background function size. + if (statement.functionName != null) + { + dataOffset += FIELD_HEADER_SIZE + 1; // udf type + dataOffset += ByteUtil.EstimateSizeUtf8(statement.packageName) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(statement.functionName) + FIELD_HEADER_SIZE; + + if (statement.functionArgs.Length > 0) + { + functionArgBuffer = Packer.Pack(statement.functionArgs); + } + else + { + functionArgBuffer = Array.Empty(); + } + dataOffset += FIELD_HEADER_SIZE + functionArgBuffer.Length; + fieldCount += 4; + } + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + long maxRecords = 0; + int partsFullSize = 0; + int partsPartialDigestSize = 0; + int partsPartialBValSize = 0; + + if (nodePartitions != null) + { + partsFullSize = nodePartitions.partsFull.Count * 2; + partsPartialDigestSize = nodePartitions.partsPartial.Count * 20; + + if (statement.filter != null) + { + partsPartialBValSize = nodePartitions.partsPartial.Count * 8; + } + maxRecords = nodePartitions.recordMax; + } + + if (partsFullSize > 0) + { + dataOffset += partsFullSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (partsPartialDigestSize > 0) + { + dataOffset += partsPartialDigestSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (partsPartialBValSize > 0) + { + dataOffset += partsPartialBValSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + // Estimate max records field size. This field is used in new servers and not used + // (but harmless to add) in old servers. + if (maxRecords > 0) + { + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + } + + // Operations (used in query execute) and bin names (used in scan/query) are mutually exclusive. + int operationCount = 0; + + if (statement.operations != null) + { + // Estimate size for background operations. + if (!background) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Operations not allowed in foreground query"); + } + + foreach (Operation operation in statement.operations) + { + if (!Operation.IsWrite(operation.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Read operations not allowed in background query"); + } + EstimateOperationSize(operation); + } + operationCount = statement.operations.Length; + } + else if (statement.binNames != null && (isNew || statement.filter == null)) + { + // Estimate size for selected bin names (query bin names already handled for old servers). + foreach (string binName in statement.binNames) + { + EstimateOperationSize(binName); + } + operationCount = statement.binNames.Length; + } + + SizeBuffer(); + + if (background) + { + WriteHeaderWrite((WritePolicy)policy, Command.INFO2_WRITE, fieldCount, operationCount); + } + else + { + QueryPolicy qp = (QueryPolicy)policy; + int readAttr = Command.INFO1_READ; + int writeAttr = 0; + + if (!qp.includeBinData) + { + readAttr |= Command.INFO1_NOBINDATA; + } + + if (qp.shortQuery || qp.expectedDuration == QueryDuration.SHORT) + { + readAttr |= Command.INFO1_SHORT_QUERY; + } + else if (qp.expectedDuration == QueryDuration.LONG_RELAX_AP) + { + writeAttr |= Command.INFO2_RELAX_AP_LONG_QUERY; + } + + int infoAttr = (isNew || statement.filter == null) ? Command.INFO3_PARTITION_DONE : 0; + + WriteHeaderRead(policy, totalTimeout, readAttr, writeAttr, infoAttr, fieldCount, operationCount); + } + + if (statement.ns != null) + { + WriteField(statement.ns, FieldType.NAMESPACE); + } + + if (statement.setName != null) + { + WriteField(statement.setName, FieldType.TABLE); + } + + // Write records per second. + if (statement.recordsPerSecond > 0) + { + WriteField(statement.recordsPerSecond, FieldType.RECORDS_PER_SECOND); + } + + // Write socket idle timeout. + WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); + + // Write taskId field + WriteField(taskId, FieldType.QUERY_ID); + + if (statement.filter != null) + { + IndexCollectionType type = statement.filter.CollectionType; + + if (type != IndexCollectionType.DEFAULT) + { + WriteFieldHeader(1, FieldType.INDEX_TYPE); + dataBuffer[dataOffset++] = (byte)type; + } + + WriteFieldHeader(filterSize, FieldType.INDEX_RANGE); + dataBuffer[dataOffset++] = (byte)1; + dataOffset = statement.filter.Write(dataBuffer, dataOffset); + + if (!isNew) + { + // Query bin names are specified as a field (Scan bin names are specified later as operations) + // in old servers. + if (statement.binNames != null && statement.binNames.Length > 0) + { + WriteFieldHeader(binNameSize, FieldType.QUERY_BINLIST); + dataBuffer[dataOffset++] = (byte)statement.binNames.Length; + + foreach (string binName in statement.binNames) + { + int len = ByteUtil.StringToUtf8(binName, dataBuffer, dataOffset + 1); + dataBuffer[dataOffset] = (byte)len; + dataOffset += len + 1; + } + } + } + + if (packedCtx != null) + { + WriteFieldHeader(packedCtx.Length, FieldType.INDEX_CONTEXT); + Array.Copy(packedCtx, 0, dataBuffer, dataOffset, packedCtx.Length); + dataOffset += packedCtx.Length; + } + } + + if (statement.functionName != null) + { + WriteFieldHeader(1, FieldType.UDF_OP); + dataBuffer[dataOffset++] = background ? (byte)2 : (byte)1; + WriteField(statement.packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(statement.functionName, FieldType.UDF_FUNCTION); + WriteField(functionArgBuffer, FieldType.UDF_ARGLIST); + } + + policy.filterExp?.Write(this); + + if (partsFullSize > 0) + { + WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsFull) + { + ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); + dataOffset += 2; + } + } + + if (partsPartialDigestSize > 0) + { + WriteFieldHeader(partsPartialDigestSize, FieldType.DIGEST_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsPartial) + { + Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20); + dataOffset += 20; + } + } + + if (partsPartialBValSize > 0) + { + WriteFieldHeader(partsPartialBValSize, FieldType.BVAL_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsPartial) + { + ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset); + dataOffset += 8; + } + } + + if (maxRecords > 0) + { + WriteField((ulong)maxRecords, FieldType.MAX_RECORDS); + } + + if (statement.operations != null) + { + foreach (Operation operation in statement.operations) + { + WriteOperation(operation); + } + } + else if (statement.binNames != null && (isNew || statement.filter == null)) + { + foreach (string binName in statement.binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + End(); + } + + //-------------------------------------------------- + // Command Sizing + //-------------------------------------------------- + + private int EstimateKeyAttrSize(Policy policy, Key key, BatchAttr attr, Expression filterExp) + { + int fieldCount = EstimateKeySize(policy, key, attr.hasWrite); + + if (filterExp != null) + { + dataOffset += filterExp.Size(); + fieldCount++; + } + return fieldCount; + } + + private int EstimateKeySize(Policy policy, Key key, bool hasWrite) + { + int fieldCount = EstimateKeySize(key); + + fieldCount += SizeTxn(key, policy.Txn, hasWrite); + + if (policy.sendKey) + { + dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; + fieldCount++; + } + return fieldCount; + } + + private int EstimateKeySize(Key key) + { + int fieldCount = 0; + + if (key.ns != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (key.setName != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + fieldCount++; + } + + dataOffset += key.digest.Length + FIELD_HEADER_SIZE; + fieldCount++; + + return fieldCount; + } + + private int EstimateUdfSize(string packageName, string functionName, byte[] bytes) + { + dataOffset += ByteUtil.EstimateSizeUtf8(packageName) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(functionName) + FIELD_HEADER_SIZE; + dataOffset += bytes.Length + FIELD_HEADER_SIZE; + return 3; + } + + private void EstimateOperationSize(Bin bin) + { + dataOffset += ByteUtil.EstimateSizeUtf8(bin.name) + OPERATION_HEADER_SIZE; + dataOffset += bin.value.EstimateSize(); + } + + private void EstimateOperationSize(Operation operation) + { + dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; + dataOffset += operation.value.EstimateSize(); + } + + private void EstimateReadOperationSize(Operation operation) + { + if (Operation.IsWrite(operation.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read"); + } + dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; + dataOffset += operation.value.EstimateSize(); + } + + private void EstimateOperationSize(string binName) + { + dataOffset += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; + } + + private void EstimateOperationSize() + { + dataOffset += OPERATION_HEADER_SIZE; + } + + //-------------------------------------------------- + // Command Writes + //-------------------------------------------------- + + /// + /// Header write for write commands. + /// + private void WriteHeaderWrite(WritePolicy policy, int writeAttr, int fieldCount, int operationCount) + { + // Set flags. + int generation = 0; + int infoAttr = 0; + + switch (policy.recordExistsAction) + { + case RecordExistsAction.UPDATE: + break; + case RecordExistsAction.UPDATE_ONLY: + infoAttr |= Command.INFO3_UPDATE_ONLY; + break; + case RecordExistsAction.REPLACE: + infoAttr |= Command.INFO3_CREATE_OR_REPLACE; + break; + case RecordExistsAction.REPLACE_ONLY: + infoAttr |= Command.INFO3_REPLACE_ONLY; + break; + case RecordExistsAction.CREATE_ONLY: + writeAttr |= Command.INFO2_CREATE_ONLY; + break; + } + + switch (policy.generationPolicy) + { + case GenerationPolicy.NONE: + break; + case GenerationPolicy.EXPECT_GEN_EQUAL: + generation = policy.generation; + writeAttr |= Command.INFO2_GENERATION; + break; + case GenerationPolicy.EXPECT_GEN_GT: + generation = policy.generation; + writeAttr |= Command.INFO2_GENERATION_GT; + break; + } + + if (policy.commitLevel == CommitLevel.COMMIT_MASTER) + { + infoAttr |= Command.INFO3_COMMIT_MASTER; + } + + if (policy.durableDelete) + { + writeAttr |= Command.INFO2_DURABLE_DELETE; + } + + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)infoAttr; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + } + + /// + /// Header write for operate command. + /// + private void WriteHeaderReadWrite + ( + WritePolicy policy, + OperateArgs args, + int fieldCount + ) + { + // Set flags. + int generation = 0; + int ttl = args.hasWrite ? policy.expiration : policy.readTouchTtlPercent; + int readAttr = args.readAttr; + int writeAttr = args.writeAttr; + int infoAttr = 0; + int operationCount = args.operations.Length; + + switch (policy.recordExistsAction) + { + case RecordExistsAction.UPDATE: + break; + case RecordExistsAction.UPDATE_ONLY: + infoAttr |= Command.INFO3_UPDATE_ONLY; + break; + case RecordExistsAction.REPLACE: + infoAttr |= Command.INFO3_CREATE_OR_REPLACE; + break; + case RecordExistsAction.REPLACE_ONLY: + infoAttr |= Command.INFO3_REPLACE_ONLY; + break; + case RecordExistsAction.CREATE_ONLY: + writeAttr |= Command.INFO2_CREATE_ONLY; + break; + } + + switch (policy.generationPolicy) + { + case GenerationPolicy.NONE: + break; + case GenerationPolicy.EXPECT_GEN_EQUAL: + generation = policy.generation; + writeAttr |= Command.INFO2_GENERATION; + break; + case GenerationPolicy.EXPECT_GEN_GT: + generation = policy.generation; + writeAttr |= Command.INFO2_GENERATION_GT; + break; + } + + if (policy.commitLevel == CommitLevel.COMMIT_MASTER) + { + infoAttr |= Command.INFO3_COMMIT_MASTER; + } + + if (policy.durableDelete) + { + writeAttr |= Command.INFO2_DURABLE_DELETE; + } + switch (policy.readModeSC) + { + case ReadModeSC.SESSION: + break; + case ReadModeSC.LINEARIZE: + infoAttr |= Command.INFO3_SC_READ_TYPE; + break; + case ReadModeSC.ALLOW_REPLICA: + infoAttr |= Command.INFO3_SC_READ_RELAX; + break; + case ReadModeSC.ALLOW_UNAVAILABLE: + infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; + break; + } + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + if (policy.compress) + { + readAttr |= Command.INFO1_COMPRESS_RESPONSE; + } + + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)infoAttr; + dataBuffer[dataOffset++] = 0; // unused + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)ttl, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + } + + /// + /// Header write for read commands. + /// + private void WriteHeaderRead + ( + Policy policy, + int timeout, + int readAttr, + int writeAttr, + int infoAttr, + int fieldCount, + int operationCount + ) + { + switch (policy.readModeSC) + { + case ReadModeSC.SESSION: + break; + case ReadModeSC.LINEARIZE: + infoAttr |= Command.INFO3_SC_READ_TYPE; + break; + case ReadModeSC.ALLOW_REPLICA: + infoAttr |= Command.INFO3_SC_READ_RELAX; + break; + case ReadModeSC.ALLOW_UNAVAILABLE: + infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; + break; + } + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + if (policy.compress) + { + readAttr |= Command.INFO1_COMPRESS_RESPONSE; + } + + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)infoAttr; + + for (int i = 0; i < 6; i++) + { + dataBuffer[dataOffset++] = 0; + } + dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + } + + /// + /// Header write for read header commands. + /// + private void WriteHeaderReadHeader(Policy policy, int readAttr, int fieldCount, int operationCount) + { + int infoAttr = 0; + + switch (policy.readModeSC) + { + case ReadModeSC.SESSION: + break; + case ReadModeSC.LINEARIZE: + infoAttr |= Command.INFO3_SC_READ_TYPE; + break; + case ReadModeSC.ALLOW_REPLICA: + infoAttr |= Command.INFO3_SC_READ_RELAX; + break; + case ReadModeSC.ALLOW_UNAVAILABLE: + infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; + break; + } + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)infoAttr; + + for (int i = 0; i < 6; i++) + { + dataBuffer[dataOffset++] = 0; + } + dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + } + + /// + /// Header write for batch single commands. + /// + private void WriteKeyAttr( + Policy policy, + Key key, + BatchAttr attr, + Expression filterExp, + int fieldCount, + int operationCount + ) + { + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = 0; // unused + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes((uint)attr.generation, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + + WriteKey(policy, key, attr.hasWrite); + + filterExp?.Write(this); + } + + private void WriteKey(Policy policy, Key key, bool sendDeadline) + { + WriteKey(key); + WriteTxn(policy.Txn, sendDeadline); + + if (policy.sendKey) + { + WriteField(key.userKey, FieldType.KEY); + } + } + + private void WriteKey(Key key) + { + // Write key into dataBuffer. + if (key.ns != null) + { + WriteField(key.ns, FieldType.NAMESPACE); + } + + if (key.setName != null) + { + WriteField(key.setName, FieldType.TABLE); + } + + WriteField(key.digest, FieldType.DIGEST_RIPE); + } + + private int WriteReadOnlyOperations(Operation[] ops, int readAttr) + { + bool readBin = false; + bool readHeader = false; + + foreach (Operation op in ops) + { + switch (op.type) + { + case Operation.Type.READ: + // Read all bins if no bin is specified. + if (op.binName == null) + { + readAttr |= Command.INFO1_GET_ALL; + } + readBin = true; + break; + + case Operation.Type.READ_HEADER: + readHeader = true; + break; + + default: + break; + } + WriteOperation(op); + } + + if (readHeader && !readBin) + { + readAttr |= Command.INFO1_NOBINDATA; + } + return readAttr; + } + + private void WriteOperation(Bin bin, Operation.Type operationType) + { + int nameLength = ByteUtil.StringToUtf8(bin.name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); + int valueLength = bin.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); + + ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); + dataBuffer[dataOffset++] = (byte)bin.value.Type; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)nameLength; + dataOffset += nameLength + valueLength; + } + + private void WriteOperation(Operation operation) + { + int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); + int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); + + ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type); + dataBuffer[dataOffset++] = (byte)operation.value.Type; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)nameLength; + dataOffset += nameLength + valueLength; + } + + private void WriteOperation(string name, Operation.Type operationType) + { + int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); + + ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)nameLength; + dataOffset += nameLength; + } + + private void WriteOperation(Operation.Type operationType) + { + ByteUtil.IntToBytes(4, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + } + + private int SizeTxn(Key key, Txn txn, bool hasWrite) + { + int fieldCount = 0; + + if (txn != null) + { + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + + Version = txn.GetReadVersion(key); + + if (Version.HasValue) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (hasWrite && txn.Deadline != 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + } + } + return fieldCount; + } + + private void WriteTxn(Txn txn, bool sendDeadline) + { + if (txn != null) + { + WriteFieldLE(txn.Id, FieldType.MRT_ID); + + if (Version.HasValue) + { + WriteFieldVersion(Version.Value); + } + + if (sendDeadline && txn.Deadline != 0) + { + WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE); + } + } + } + + private void WriteFieldVersion(long ver) + { + WriteFieldHeader(7, FieldType.RECORD_VERSION); + ByteUtil.LongToVersionBytes(ver, dataBuffer, dataOffset); + dataOffset += 7; + } + + private void WriteField(Value value, int type) + { + int offset = dataOffset + FIELD_HEADER_SIZE; + dataBuffer[offset++] = (byte)value.Type; + int len = value.Write(dataBuffer, offset) + 1; + WriteFieldHeader(len, type); + dataOffset += len; + } + + private void WriteField(string str, int type) + { + int len = ByteUtil.StringToUtf8(str, dataBuffer, dataOffset + FIELD_HEADER_SIZE); + WriteFieldHeader(len, type); + dataOffset += len; + } + + private void WriteField(byte[] bytes, int type) + { + Array.Copy(bytes, 0, dataBuffer, dataOffset + FIELD_HEADER_SIZE, bytes.Length); + WriteFieldHeader(bytes.Length, type); + dataOffset += bytes.Length; + } + + private void WriteField(int val, int type) + { + WriteFieldHeader(4, type); + ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset); + dataOffset += 4; + } + + private void WriteFieldLE(int val, int type) + { + WriteFieldHeader(4, type); + ByteUtil.IntToLittleBytes((uint)val, dataBuffer, dataOffset); + dataOffset += 4; + } + + private void WriteField(ulong val, int type) + { + WriteFieldHeader(8, type); + ByteUtil.LongToBytes(val, dataBuffer, dataOffset); + dataOffset += 8; + } + + private void WriteFieldLE(long val, int type) + { + WriteFieldHeader(8, type); + ByteUtil.LongToLittleBytes((ulong)val, dataBuffer, dataOffset); + dataOffset += 8; + } + + private void WriteFieldHeader(int size, int type) + { + ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = (byte)type; + } + + internal virtual void WriteExpHeader(int size) + { + WriteFieldHeader(size, FieldType.FILTER_EXP); + } + + private void Begin() + { + dataOffset = MSG_TOTAL_HEADER_SIZE; + } + + private bool SizeBuffer(Policy policy) + { + if (policy.compress && dataOffset > COMPRESS_THRESHOLD) + { + // Command will be compressed. First, write uncompressed command + // into separate dataBuffer. Save normal dataBuffer for compressed command. + // Normal dataBuffer in async mode is from dataBuffer pool that is used to + // minimize memory pinning during socket operations. + dataBuffer = new byte[dataOffset]; + dataOffset = 0; + return true; + } + else + { + // Command will be uncompressed. + SizeBuffer(); + return false; + } + } + + private void End(bool compress) + { + if (!compress) + { + End(); + return; + } + + // Write proto header. + ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48); + ByteUtil.LongToBytes(size, dataBuffer, 0); + + byte[] srcBuf = dataBuffer; + int srcSize = dataOffset; + + // Increase requested dataBuffer size in case compressed dataBuffer size is + // greater than the uncompressed dataBuffer size. + dataOffset += 16 + 100; + + // This method finds dataBuffer of requested size, resets dataOffset to segment offset + // and returns dataBuffer max size; + int trgBufSize = SizeBuffer(); + + // Compress to target starting at new dataOffset plus new header. + int trgSize = ByteUtil.Compress(srcBuf, srcSize, dataBuffer, dataOffset + 16, trgBufSize - 16) + 16; + + ulong proto = ((ulong)trgSize - 8) | (CL_MSG_VERSION << 56) | (MSG_TYPE_COMPRESSED << 48); + ByteUtil.LongToBytes(proto, dataBuffer, dataOffset); + ByteUtil.LongToBytes((ulong)srcSize, dataBuffer, dataOffset + 8); + SetLength(trgSize); + } + + protected internal abstract int SizeBuffer(); + protected internal abstract void End(); + protected internal abstract void SetLength(int length); + + //-------------------------------------------------- + // Response Parsing + //-------------------------------------------------- + + internal virtual void SkipKey(int fieldCount) + { + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4 + fieldlen; + } + } + + internal virtual Key ParseKey(int fieldCount, out ulong bval) + { + byte[] digest = null; + string ns = null; + string setName = null; + Value userKey = null; + bval = 0; + + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int fieldtype = dataBuffer[dataOffset++]; + int size = fieldlen - 1; + + switch (fieldtype) + { + case FieldType.DIGEST_RIPE: + digest = new byte[size]; + Array.Copy(dataBuffer, dataOffset, digest, 0, size); + break; + + case FieldType.NAMESPACE: + ns = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size); + break; + + case FieldType.TABLE: + setName = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size); + break; + + case FieldType.KEY: + int type = dataBuffer[dataOffset++]; + size--; + userKey = ByteUtil.BytesToKeyValue((ParticleType)type, dataBuffer, dataOffset, size); + break; + + case FieldType.BVAL_ARRAY: + bval = (ulong)ByteUtil.LittleBytesToLong(dataBuffer, dataOffset); + break; + } + dataOffset += size; + } + return new Key(ns, digest, setName, userKey); + } + + public long? ParseVersion(int fieldCount) + { + long? version = null; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION && size == 7) + { + version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); + } + dataOffset += size; + } + return version; + } + + protected void ParseFields(Txn txn, Key key, bool hasWrite) + { + if (txn == null) + { + SkipFields(fieldCount); + return; + } + + long? version = null; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION) + { + if (size == 7) + { + version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); + } + else + { + throw new AerospikeException("Record version field has invalid size: " + size); + } + } + dataOffset += size; + } + + if (hasWrite) + { + txn.OnWrite(key, version, resultCode); + } + else + { + txn.OnRead(key, version); + } + } + + protected void ParseTxnDeadline(Txn txn) + { + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.MRT_DEADLINE) + { + int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset); + txn.Deadline = deadline; + } + dataOffset += size; + } + } + + protected void SkipFields(int fieldCount) + { + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4 + fieldlen; + } + } + + public static bool BatchInDoubt(bool isWrite, int commandSentCounter) + { + return isWrite && commandSentCounter > 1; + } + + public interface BatchOffsets + { + int Size(); + int Get(int i); + } + + private class BatchOffsetsNative : BatchOffsets + { + private int size; + private int[] offsets; + + public BatchOffsetsNative(BatchNode batch) + { + this.size = batch.offsetsSize; + this.offsets = batch.offsets; + } + + public int Size() + { + return size; + } + + public int Get(int i) + { + return offsets[i]; + } + } + } +} +#pragma warning restore 0618 diff --git a/AerospikeClient/Command/DeleteCommand.cs b/AerospikeClient/Command/DeleteCommand.cs index de56574e..ce1a005e 100644 --- a/AerospikeClient/Command/DeleteCommand.cs +++ b/AerospikeClient/Command/DeleteCommand.cs @@ -1,96 +1,71 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class DeleteCommand : SyncCommand - { - private readonly WritePolicy writePolicy; - private readonly Key key; - private readonly Partition partition; - private bool existed; - - public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.Write(cluster, writePolicy, key); - cluster.AddTran(); - } - - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode() - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - - protected internal override void WriteBuffer() - { - SetDelete(writePolicy, key); - } - - protected internal override void ParseResult(Connection conn) - { - ParseHeader(conn); - - if (resultCode == 0) - { - existed = true; - return; - } - - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) - { - existed = false; - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (writePolicy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - existed = true; - return; - } - - throw new AerospikeException(resultCode); - } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - - public bool Existed() - { - return existed; - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using System; + +namespace Aerospike.Client +{ + public sealed class DeleteCommand : SyncWriteCommand + { + private bool existed; + + public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + } + + protected internal override void WriteBuffer() + { + SetDelete(writePolicy, key); + } + + protected internal override void ParseResult(Connection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK) + { + existed = true; + return; + } + + if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + existed = false; + return; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (writePolicy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + existed = true; + return; + } + + throw new AerospikeException(resultCode); + } + + public bool Existed() + { + return existed; + } + } +} diff --git a/AerospikeClient/Command/ExecuteCommand.cs b/AerospikeClient/Command/ExecuteCommand.cs index a92bfe3b..6637d24a 100644 --- a/AerospikeClient/Command/ExecuteCommand.cs +++ b/AerospikeClient/Command/ExecuteCommand.cs @@ -15,14 +15,16 @@ * the License. */ +using Aerospike.Client; + namespace Aerospike.Client { - public sealed class ExecuteCommand : ReadCommand + public sealed class ExecuteCommand : SyncWriteCommand { - private readonly WritePolicy writePolicy; private readonly string packageName; private readonly string functionName; private readonly Value[] args; + public Record Record { get; private set; } public ExecuteCommand ( @@ -32,43 +34,73 @@ public ExecuteCommand string packageName, string functionName, Value[] args - ) : base(cluster, writePolicy, key, Partition.Write(cluster, writePolicy, key), false) + ) : base(cluster, writePolicy, key) { - this.writePolicy = writePolicy; this.packageName = packageName; this.functionName = functionName; this.args = args; } - protected internal override bool IsWrite() + protected internal override void WriteBuffer() { - return true; + SetUdf(writePolicy, key, packageName, functionName, args); } - protected internal override Node GetNode() + protected internal override void ParseResult(Connection conn) { - return partition.GetNodeWrite(cluster); - } + ParseHeader(conn); + ParseFields(policy.Txn, key, true); - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } + if (resultCode == ResultCode.OK) + { + Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false); + return; + } - protected internal override void WriteBuffer() - { - SetUdf(writePolicy, key, packageName, functionName, args); - } + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false); + HandleUdfError(resultCode); + return; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return; + } - protected internal override void HandleNotFound(int resultCode) - { throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) + private void HandleUdfError(int resultCode) { - partition.PrepareRetryWrite(timeout); - return true; + string ret = (string)Record.bins["FAILURE"]; + + if (ret == null) + { + throw new AerospikeException(resultCode); + } + + string message; + int code; + + try + { + string[] list = ret.Split(":"); + Int32.TryParse(list[2].Trim(), out code); + message = list[0] + ':' + list[1] + ' ' + list[3]; + } + catch (Exception e) + { + // Use generic exception if parse error occurs. + throw new AerospikeException(resultCode, ret); + } + + throw new AerospikeException(code, message); } } } diff --git a/AerospikeClient/Command/ExistsCommand.cs b/AerospikeClient/Command/ExistsCommand.cs index 64b734d6..24158b0c 100644 --- a/AerospikeClient/Command/ExistsCommand.cs +++ b/AerospikeClient/Command/ExistsCommand.cs @@ -17,28 +17,13 @@ namespace Aerospike.Client { - public sealed class ExistsCommand : SyncCommand + public sealed class ExistsCommand : SyncReadCommand { - private readonly Key key; - private readonly Partition partition; private bool exists; public ExistsCommand(Cluster cluster, Policy policy, Key key) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); - } - - protected internal override Node GetNode() - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; } protected internal override void WriteBuffer() @@ -49,8 +34,9 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(Connection conn) { ParseHeader(conn); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { exists = true; return; @@ -75,12 +61,6 @@ protected internal override void ParseResult(Connection conn) throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - public bool Exists() { return exists; diff --git a/AerospikeClient/Command/FieldType.cs b/AerospikeClient/Command/FieldType.cs index 75cd5fa3..7b4b01d0 100644 --- a/AerospikeClient/Command/FieldType.cs +++ b/AerospikeClient/Command/FieldType.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -21,8 +21,11 @@ public sealed class FieldType public const int NAMESPACE = 0; public const int TABLE = 1; public const int KEY = 2; + public const int RECORD_VERSION = 3; public const int DIGEST_RIPE = 4; - public const int TRAN_ID = 7; // user supplied transaction id, which is simply passed back + public const int MRT_ID = 5; + public const int MRT_DEADLINE = 6; + public const int QUERY_ID = 7; public const int SOCKET_TIMEOUT = 9; public const int RECORDS_PER_SECOND = 10; public const int PID_ARRAY = 11; diff --git a/AerospikeClient/Command/MultiCommand.cs b/AerospikeClient/Command/MultiCommand.cs index c95ceabb..f2178cfe 100644 --- a/AerospikeClient/Command/MultiCommand.cs +++ b/AerospikeClient/Command/MultiCommand.cs @@ -27,12 +27,7 @@ public abstract class MultiCommand : SyncCommand protected internal readonly String ns; private readonly ulong clusterKey; protected internal int info3; - protected internal int resultCode; - protected internal int generation; - protected internal int expiration; protected internal int batchIndex; - protected internal int fieldCount; - protected internal int opCount; protected internal readonly bool isOperation; private readonly bool first; protected internal volatile bool valid = true; diff --git a/AerospikeClient/Command/OperateArgs.cs b/AerospikeClient/Command/OperateArgs.cs index c13edab7..bdf42800 100644 --- a/AerospikeClient/Command/OperateArgs.cs +++ b/AerospikeClient/Command/OperateArgs.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -30,7 +30,6 @@ public OperateArgs WritePolicy policy, WritePolicy writeDefault, WritePolicy readDefault, - Key key, Operation[] operations ) { @@ -131,17 +130,5 @@ Operation[] operations } writeAttr = wattr; } - - public Partition GetPartition(Cluster cluster, Key key) - { - if (hasWrite) - { - return Partition.Write(cluster, writePolicy, key); - } - else - { - return Partition.Read(cluster, writePolicy, key); - } - } } } diff --git a/AerospikeClient/Command/OperateCommandRead.cs b/AerospikeClient/Command/OperateCommandRead.cs new file mode 100644 index 00000000..f20f2ec7 --- /dev/null +++ b/AerospikeClient/Command/OperateCommandRead.cs @@ -0,0 +1,35 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class OperateCommandRead : ReadCommand + { + private readonly OperateArgs args; + + public OperateCommandRead(Cluster cluster, Key key, OperateArgs args) + : base(cluster, args.writePolicy, key, true) + { + this.args = args; + } + + protected internal override void WriteBuffer() + { + SetOperate(args.writePolicy, key, args); + } + } +} diff --git a/AerospikeClient/Command/OperateCommandWrite.cs b/AerospikeClient/Command/OperateCommandWrite.cs new file mode 100644 index 00000000..2eec1e56 --- /dev/null +++ b/AerospikeClient/Command/OperateCommandWrite.cs @@ -0,0 +1,61 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using Aerospike.Client; + +namespace Aerospike.Client +{ + public sealed class OperateCommandWrite : SyncWriteCommand + { + private readonly OperateArgs args; + public Record Record { get; private set; } + + public OperateCommandWrite(Cluster cluster, Key key, OperateArgs args) + : base(cluster, args.writePolicy, key) + { + this.args = args; + } + + protected internal override void WriteBuffer() + { + SetOperate(args.writePolicy, key, args); + } + + protected internal override void ParseResult(Connection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK) { + Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, true); + return; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return; + } + + throw new AerospikeException(resultCode); + } + + } +} diff --git a/AerospikeClient/Command/ReadCommand.cs b/AerospikeClient/Command/ReadCommand.cs index 8dfc8a57..28d40570 100644 --- a/AerospikeClient/Command/ReadCommand.cs +++ b/AerospikeClient/Command/ReadCommand.cs @@ -17,52 +17,31 @@ namespace Aerospike.Client { - public class ReadCommand : SyncCommand + public class ReadCommand : SyncReadCommand { - protected readonly Key key; - protected readonly Partition partition; private readonly string[] binNames; private readonly bool isOperation; private Record record; public ReadCommand(Cluster cluster, Policy policy, Key key) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; this.binNames = null; - this.partition = Partition.Read(cluster, policy, key); this.isOperation = false; - cluster.AddTran(); } public ReadCommand(Cluster cluster, Policy policy, Key key, String[] binNames) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; this.binNames = binNames; - this.partition = Partition.Read(cluster, policy, key); this.isOperation = false; - cluster.AddTran(); } - public ReadCommand(Cluster cluster, Policy policy, Key key, Partition partition, bool isOperation) - : base(cluster, policy) + public ReadCommand(Cluster cluster, Policy policy, Key key, bool isOperation) + : base(cluster, policy, key) { - this.key = key; this.binNames = null; - this.partition = partition; this.isOperation = isOperation; - cluster.AddTran(); - } - - protected internal override Node GetNode() - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; } protected internal override void WriteBuffer() @@ -73,23 +52,16 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(Connection conn) { ParseHeader(conn); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { - if (opCount == 0) - { - // Bin data was not returned. - record = new Record(null, generation, expiration); - return; - } - SkipKey(fieldCount); - record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); + this.record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); return; } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - HandleNotFound(resultCode); return; } @@ -102,56 +74,8 @@ record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, ge return; } - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - SkipKey(fieldCount); - record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - HandleUdfError(resultCode); - return; - } - throw new AerospikeException(resultCode); } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - - protected internal virtual void HandleNotFound(int resultCode) - { - // Do nothing in default case. Record will be null. - } - - private void HandleUdfError(int resultCode) - { - object obj; - - if (!record.bins.TryGetValue("FAILURE", out obj)) - { - throw new AerospikeException(resultCode); - } - - string ret = (string)obj; - string message; - int code; - - try - { - string[] list = ret.Split(':'); - code = Convert.ToInt32(list[2].Trim()); - message = list[0] + ':' + list[1] + ' ' + list[3]; - } - catch (Exception e) - { - // Use generic exception if parse error occurs. - throw new AerospikeException(resultCode, ret, e); - } - - throw new AerospikeException(code, message); - } - public Record Record { get diff --git a/AerospikeClient/Command/ReadHeaderCommand.cs b/AerospikeClient/Command/ReadHeaderCommand.cs index 6fde67f9..dfeceb15 100644 --- a/AerospikeClient/Command/ReadHeaderCommand.cs +++ b/AerospikeClient/Command/ReadHeaderCommand.cs @@ -17,28 +17,13 @@ namespace Aerospike.Client { - public sealed class ReadHeaderCommand : SyncCommand + public sealed class ReadHeaderCommand : SyncReadCommand { - private readonly Key key; - private readonly Partition partition; private Record record; public ReadHeaderCommand(Cluster cluster, Policy policy, Key key) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); - } - - protected internal override Node GetNode() - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; } protected internal override void WriteBuffer() @@ -49,8 +34,8 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(Connection conn) { ParseHeader(conn); - - if (resultCode == 0) + ParseFields(policy.Txn, key, false); + if (resultCode == ResultCode.OK) { record = new Record(null, generation, expiration); return; @@ -73,12 +58,6 @@ protected internal override void ParseResult(Connection conn) throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - public Record Record { get diff --git a/AerospikeClient/Command/ScanExecutor.cs b/AerospikeClient/Command/ScanExecutor.cs index 8380d849..a50db933 100644 --- a/AerospikeClient/Command/ScanExecutor.cs +++ b/AerospikeClient/Command/ScanExecutor.cs @@ -24,7 +24,7 @@ public sealed class ScanExecutor { public static void ScanPartitions(Cluster cluster, ScanPolicy policy, string ns, string setName, string[] binNames, ScanCallback callback, PartitionTracker tracker) { - cluster.AddTran(); + cluster.AddCommandCount(); while (true) { diff --git a/AerospikeClient/Command/SyncCommand.cs b/AerospikeClient/Command/SyncCommand.cs index 356b92eb..08cca1ac 100644 --- a/AerospikeClient/Command/SyncCommand.cs +++ b/AerospikeClient/Command/SyncCommand.cs @@ -1,439 +1,442 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Net.Sockets; -using static Aerospike.Client.Latency; - -namespace Aerospike.Client -{ - public abstract class SyncCommand : Command - { - protected readonly Cluster cluster; - protected readonly Policy policy; - internal int iteration = 1; - internal int commandSentCounter; - internal DateTime deadline; - protected int resultCode; - protected int generation; - protected int expiration; - protected int fieldCount; - protected int opCount; - - /// - /// Default constructor. - /// - public SyncCommand(Cluster cluster, Policy policy) - : base(policy.socketTimeout, policy.totalTimeout, policy.maxRetries) - { - this.cluster = cluster; - this.policy = policy; - this.deadline = DateTime.MinValue; - } - - /// - /// Scan/Query constructor. - /// - public SyncCommand(Cluster cluster, Policy policy, int socketTimeout, int totalTimeout) - : base(socketTimeout, totalTimeout, 0) - { - this.cluster = cluster; - this.policy = policy; - this.deadline = DateTime.MinValue; - } - - public virtual void Execute() - { - if (totalTimeout > 0) - { - deadline = DateTime.UtcNow.AddMilliseconds(totalTimeout); - } - ExecuteCommand(); - } - - public void ExecuteCommand() - { - Node node; - AerospikeException exception = null; - ValueStopwatch metricsWatch = new(); - LatencyType latencyType = cluster.MetricsEnabled ? GetLatencyType() : LatencyType.NONE; - bool isClientTimeout; - - // Execute command until successful, timed out or maximum iterations have been reached. - while (true) - { - try - { - node = GetNode(); - } - catch (AerospikeException ae) - { - ae.Policy = policy; - ae.Iteration = iteration; - ae.SetInDoubt(IsWrite(), commandSentCounter); - throw; - } - - try - { - node.ValidateErrorCount(); - if (latencyType != LatencyType.NONE) - { - metricsWatch = ValueStopwatch.StartNew(); - } - Connection conn = node.GetConnection(socketTimeout, policy.TimeoutDelay); - - try - { - // Set command buffer. - WriteBuffer(); - - // Send command. - conn.Write(dataBuffer, dataOffset); - commandSentCounter++; - - // Parse results. - ParseResult(conn); - - // Put connection back in pool. - node.PutConnection(conn); - - if (latencyType != LatencyType.NONE) - { - node.AddLatency(latencyType, metricsWatch.Elapsed.TotalMilliseconds); - } - - // Command has completed successfully. Exit method. - return; - } - catch (AerospikeException ae) - { - if (ae.KeepConnection()) - { - // Put connection back in pool. - node.PutConnection(conn); - } - else - { - // Close socket to flush out possible garbage. Do not put back in pool. - node.CloseConnectionOnError(conn); - } - - if (ae.Result == ResultCode.TIMEOUT) - { - // Retry on server timeout. - exception = new AerospikeException.Timeout(policy, false); - isClientTimeout = false; - node.IncrErrorRate(); - node.AddTimeout(); - } - else if (ae.Result == ResultCode.DEVICE_OVERLOAD) - { - // Add to circuit breaker error count and retry. - exception = ae; - isClientTimeout = false; - node.IncrErrorRate(); - node.AddError(); - } - else - { - node.AddError(); - throw; - } - } - catch (Connection.ReadTimeout crt) - { - if (policy.TimeoutDelay > 0) - { - cluster.RecoverConnection(new ConnectionRecover(conn, node, policy.TimeoutDelay, crt, IsSingle())); - conn = null; - } - else - { - node.CloseConnection(conn); - } - exception = new AerospikeException.Timeout(policy, true); - isClientTimeout = true; - node.AddTimeout(); - } - catch (SocketException se) - { - // Socket errors are considered temporary anomalies. - // Retry after closing connection. - node.CloseConnectionOnError(conn); - - if (se.SocketErrorCode == SocketError.TimedOut) - { - isClientTimeout = true; - node.AddTimeout(); - } - else - { - exception = new AerospikeException.Connection(se); - isClientTimeout = false; - node.AddError(); - } - } - catch (IOException ioe) - { - // IO errors are considered temporary anomalies. Retry. - // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration); - node.CloseConnection(conn); - exception = new AerospikeException.Connection(ioe); - isClientTimeout = false; - node.AddError(); - } - catch (Exception) - { - // All other exceptions are considered fatal. Do not retry. - // Close socket to flush out possible garbage. Do not put back in pool. - node.CloseConnectionOnError(conn); - node.AddError(); - throw; - } - } - catch (SocketException se) - { - // This exception might happen after initial connection succeeded, but - // user login failed with a socket error. Retry. - if (se.SocketErrorCode == SocketError.TimedOut) - { - isClientTimeout = true; - node.AddTimeout(); - } - else - { - exception = new AerospikeException.Connection(se); - isClientTimeout = false; - node.AddError(); - } - } - catch (IOException ioe) - { - // IO errors are considered temporary anomalies. Retry. - // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration); - exception = new AerospikeException.Connection(ioe); - isClientTimeout = false; - node.AddError(); - } - catch (Connection.ReadTimeout) - { - // Connection already handled. - exception = new AerospikeException.Timeout(policy, true); - isClientTimeout = true; - node.AddTimeout(); - } - catch (AerospikeException.Connection ce) - { - // Socket connection error has occurred. Retry. - exception = ce; - isClientTimeout = false; - node.AddError(); - } - catch (AerospikeException.Backoff be) - { - // Node is in backoff state. Retry, hopefully on another node. - exception = be; - isClientTimeout = false; - node.AddError(); - } - catch (AerospikeException ae) - { - ae.Node = node; - ae.Policy = policy; - ae.Iteration = iteration; - ae.SetInDoubt(IsWrite(), commandSentCounter); - node.AddError(); - throw; - } - catch (Exception) - { - node.AddError(); - throw; - } - - // Check maxRetries. - if (iteration > maxRetries) - { - break; - } - - if (totalTimeout > 0) - { - // Check for total timeout. - long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries; - - if (remaining <= 0) - { - break; - } - - if (remaining < totalTimeout) - { - totalTimeout = (int)remaining; - - if (socketTimeout > totalTimeout) - { - socketTimeout = totalTimeout; - } - } - } - - if (!isClientTimeout && policy.sleepBetweenRetries > 0) - { - // Sleep before trying again. - Util.Sleep(policy.sleepBetweenRetries); - } - - iteration++; - - if (!PrepareRetry(isClientTimeout || exception.Result != ResultCode.SERVER_NOT_AVAILABLE)) - { - // Batch may be retried in separate commands. - if (RetryBatch(cluster, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter)) - { - // Batch was retried in separate commands. Complete this command. - return; - } - } - - cluster.AddRetry(); - } - - // Retries have been exhausted. Throw last exception. - if (isClientTimeout) - { - exception = new AerospikeException.Timeout(policy, true); - } - exception.Node = node; - exception.Policy = policy; - exception.Iteration = iteration; - exception.SetInDoubt(IsWrite(), commandSentCounter); - throw exception; - } - - protected internal sealed override int SizeBuffer() - { - dataBuffer = ThreadLocalData.GetBuffer(); - - if (dataOffset > dataBuffer.Length) - { - dataBuffer = ThreadLocalData.ResizeBuffer(dataOffset); - } - dataOffset = 0; - return dataBuffer.Length; - } - - protected internal void SizeBuffer(int size) - { - if (size > dataBuffer.Length) - { - dataBuffer = ThreadLocalData.ResizeBuffer(size); - } - } - - protected internal sealed override void End() - { - // Write total size of message. - ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48); - ByteUtil.LongToBytes(size, dataBuffer, 0); - } - - protected internal void ParseHeader(Connection conn) - { - // Read header. - conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); - - long sz = ByteUtil.BytesToLong(dataBuffer, 0); - int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); - - if (receiveSize <= 0) - { - throw new AerospikeException("Invalid receive size: " + receiveSize); - } - - SizeBuffer(receiveSize); - conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); - conn.UpdateLastUsed(); - - ulong type = (ulong)((sz >> 48) & 0xff); - - if (type == Command.AS_MSG_TYPE) - { - dataOffset = 5; - } - else if (type == Command.MSG_TYPE_COMPRESSED) - { - int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); - byte[] ubuf = new byte[usize]; - - ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); - dataBuffer = ubuf; - dataOffset = 13; - } - else - { - throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); - } - - this.resultCode = dataBuffer[dataOffset]; - dataOffset++; - this.generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - this.expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 8; - this.fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - this.opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - } - - protected internal sealed override void SetLength(int length) - { - dataOffset = length; - } - - protected internal virtual bool RetryBatch - ( - Cluster cluster, - int socketTimeout, - int totalTimeout, - DateTime deadline, - int iteration, - int commandSentCounter - ) - { - // Override this method in batch to regenerate node assignments. - return false; - } - - protected internal virtual bool IsWrite() - { - return false; - } - - protected virtual bool IsSingle() - { - return true; - } - - protected internal abstract Node GetNode(); - - protected abstract LatencyType GetLatencyType(); - protected internal abstract void WriteBuffer(); - protected internal abstract void ParseResult(Connection conn); - protected internal abstract bool PrepareRetry(bool timeout); - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System; +using System.Net.Sockets; +using System.Runtime.InteropServices; +using static Aerospike.Client.Latency; + +namespace Aerospike.Client +{ + public abstract class SyncCommand : Command + { + protected readonly Cluster cluster; + protected readonly Policy policy; + internal int iteration = 1; + internal int commandSentCounter; + internal DateTime deadline; + + /// + /// Default constructor. + /// + public SyncCommand(Cluster cluster, Policy policy) + : base(policy.socketTimeout, policy.totalTimeout, policy.maxRetries) + { + this.cluster = cluster; + this.policy = policy; + this.deadline = DateTime.MinValue; + } + + /// + /// Scan/Query constructor. + /// + public SyncCommand(Cluster cluster, Policy policy, int socketTimeout, int totalTimeout) + : base(socketTimeout, totalTimeout, 0) + { + this.cluster = cluster; + this.policy = policy; + this.deadline = DateTime.MinValue; + } + + public virtual void Execute() + { + if (totalTimeout > 0) + { + deadline = DateTime.UtcNow.AddMilliseconds(totalTimeout); + } + ExecuteCommand(); + } + + public void ExecuteCommand() + { + Node node; + AerospikeException exception = null; + ValueStopwatch metricsWatch = new(); + LatencyType latencyType = cluster.MetricsEnabled ? GetLatencyType() : LatencyType.NONE; + bool isClientTimeout; + + // Execute command until successful, timed out or maximum iterations have been reached. + while (true) + { + try + { + node = GetNode(); + } + catch (AerospikeException ae) + { + ae.Policy = policy; + ae.Iteration = iteration; + ae.SetInDoubt(IsWrite(), commandSentCounter); + throw; + } + + try + { + node.ValidateErrorCount(); + if (latencyType != LatencyType.NONE) + { + metricsWatch = ValueStopwatch.StartNew(); + } + Connection conn = node.GetConnection(socketTimeout, policy.TimeoutDelay); + + try + { + // Set command buffer. + WriteBuffer(); + + // Send command. + conn.Write(dataBuffer, dataOffset); + commandSentCounter++; + + // Parse results. + ParseResult(conn); + + // Put connection back in pool. + node.PutConnection(conn); + + if (latencyType != LatencyType.NONE) + { + node.AddLatency(latencyType, metricsWatch.Elapsed.TotalMilliseconds); + } + + // Command has completed successfully. Exit method. + return; + } + catch (AerospikeException ae) + { + if (ae.KeepConnection()) + { + // Put connection back in pool. + node.PutConnection(conn); + } + else + { + // Close socket to flush out possible garbage. Do not put back in pool. + node.CloseConnectionOnError(conn); + } + + if (ae.Result == ResultCode.TIMEOUT) + { + // Retry on server timeout. + exception = new AerospikeException.Timeout(policy, false); + isClientTimeout = false; + node.IncrErrorRate(); + node.AddTimeout(); + } + else if (ae.Result == ResultCode.DEVICE_OVERLOAD) + { + // Add to circuit breaker error count and retry. + exception = ae; + isClientTimeout = false; + node.IncrErrorRate(); + node.AddError(); + } + else + { + node.AddError(); + throw; + } + } + catch (Connection.ReadTimeout crt) + { + if (policy.TimeoutDelay > 0) + { + cluster.RecoverConnection(new ConnectionRecover(conn, node, policy.TimeoutDelay, crt, IsSingle())); + conn = null; + } + else + { + node.CloseConnection(conn); + } + exception = new AerospikeException.Timeout(policy, true); + isClientTimeout = true; + node.AddTimeout(); + } + catch (SocketException se) + { + // Socket errors are considered temporary anomalies. + // Retry after closing connection. + node.CloseConnectionOnError(conn); + + if (se.SocketErrorCode == SocketError.TimedOut) + { + isClientTimeout = true; + node.AddTimeout(); + } + else + { + exception = new AerospikeException.Connection(se); + isClientTimeout = false; + node.AddError(); + } + } + catch (IOException ioe) + { + // IO errors are considered temporary anomalies. Retry. + // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration); + node.CloseConnection(conn); + exception = new AerospikeException.Connection(ioe); + isClientTimeout = false; + node.AddError(); + } + catch (Exception) + { + // All other exceptions are considered fatal. Do not retry. + // Close socket to flush out possible garbage. Do not put back in pool. + node.CloseConnectionOnError(conn); + node.AddError(); + throw; + } + } + catch (SocketException se) + { + // This exception might happen after initial connection succeeded, but + // user login failed with a socket error. Retry. + if (se.SocketErrorCode == SocketError.TimedOut) + { + isClientTimeout = true; + node.AddTimeout(); + } + else + { + exception = new AerospikeException.Connection(se); + isClientTimeout = false; + node.AddError(); + } + } + catch (IOException ioe) + { + // IO errors are considered temporary anomalies. Retry. + // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration); + exception = new AerospikeException.Connection(ioe); + isClientTimeout = false; + node.AddError(); + } + catch (Connection.ReadTimeout) + { + // Connection already handled. + exception = new AerospikeException.Timeout(policy, true); + isClientTimeout = true; + node.AddTimeout(); + } + catch (AerospikeException.Connection ce) + { + // Socket connection error has occurred. Retry. + exception = ce; + isClientTimeout = false; + node.AddError(); + } + catch (AerospikeException.Backoff be) + { + // Node is in backoff State. Retry, hopefully on another node. + exception = be; + isClientTimeout = false; + node.AddError(); + } + catch (AerospikeException ae) + { + ae.Node = node; + ae.Policy = policy; + ae.Iteration = iteration; + ae.SetInDoubt(IsWrite(), commandSentCounter); + node.AddError(); + throw; + } + catch (Exception) + { + node.AddError(); + throw; + } + + // Check maxRetries. + if (iteration > maxRetries) + { + break; + } + + if (totalTimeout > 0) + { + // Check for total timeout. + long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries; + + if (remaining <= 0) + { + break; + } + + if (remaining < totalTimeout) + { + totalTimeout = (int)remaining; + + if (socketTimeout > totalTimeout) + { + socketTimeout = totalTimeout; + } + } + } + + if (!isClientTimeout && policy.sleepBetweenRetries > 0) + { + // Sleep before trying again. + Util.Sleep(policy.sleepBetweenRetries); + } + + iteration++; + + if (!PrepareRetry(isClientTimeout || exception.Result != ResultCode.SERVER_NOT_AVAILABLE)) + { + // Batch may be retried in separate commands. + if (RetryBatch(cluster, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter)) + { + // Batch was retried in separate commands. Complete this command. + return; + } + } + + cluster.AddRetry(); + } + + // Retries have been exhausted. Throw last exception. + if (isClientTimeout) + { + exception = new AerospikeException.Timeout(policy, true); + } + exception.Node = node; + exception.Policy = policy; + exception.Iteration = iteration; + exception.SetInDoubt(IsWrite(), commandSentCounter); + throw exception; + } + + protected internal sealed override int SizeBuffer() + { + dataBuffer = ThreadLocalData.GetBuffer(); + + if (dataOffset > dataBuffer.Length) + { + dataBuffer = ThreadLocalData.ResizeBuffer(dataOffset); + } + dataOffset = 0; + return dataBuffer.Length; + } + + protected internal void SizeBuffer(int size) + { + if (size > dataBuffer.Length) + { + dataBuffer = ThreadLocalData.ResizeBuffer(size); + } + } + + protected internal sealed override void End() + { + // Write total size of message. + ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48); + ByteUtil.LongToBytes(size, dataBuffer, 0); + } + + protected internal void ParseHeader(Connection conn) + { + // Read header. + conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); + + long sz = ByteUtil.BytesToLong(dataBuffer, 0); + int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); + + if (receiveSize <= 0) + { + throw new AerospikeException("Invalid receive size: " + receiveSize); + } + + SizeBuffer(receiveSize); + conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); + conn.UpdateLastUsed(); + + ulong type = (ulong)(sz >> 48) & 0xff; + + if (type == Command.AS_MSG_TYPE) + { + dataOffset = 5; + } + else if (type == Command.MSG_TYPE_COMPRESSED) + { + int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); + byte[] ubuf = new byte[usize]; + + ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); + dataBuffer = ubuf; + dataOffset = 13; + } + else + { + throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); + } + + this.resultCode = dataBuffer[dataOffset] & 0xFF; + dataOffset++; + this.generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + this.expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 8; + this.fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + this.opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + } + + protected internal sealed override void SetLength(int length) + { + dataOffset = length; + } + + // Do nothing by default. Write commands will override this method. + protected internal virtual void OnInDoubt() + { + + } + + protected internal virtual bool RetryBatch + ( + Cluster cluster, + int socketTimeout, + int totalTimeout, + DateTime deadline, + int iteration, + int commandSentCounter + ) + { + // Override this method in batch to regenerate node assignments. + return false; + } + + protected internal virtual bool IsWrite() + { + return false; + } + + protected virtual bool IsSingle() + { + return true; + } + + protected internal abstract Node GetNode(); + + protected abstract LatencyType GetLatencyType(); + protected internal abstract void WriteBuffer(); + protected internal abstract void ParseResult(Connection conn); + protected internal abstract bool PrepareRetry(bool timeout); + } +} diff --git a/AerospikeClient/Command/SyncReadCommand.cs b/AerospikeClient/Command/SyncReadCommand.cs new file mode 100644 index 00000000..28d1c24b --- /dev/null +++ b/AerospikeClient/Command/SyncReadCommand.cs @@ -0,0 +1,53 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class SyncReadCommand : SyncCommand + { + protected readonly Key key; + private readonly Partition partition; + + public SyncReadCommand(Cluster cluster, Policy policy, Key key) + : base(cluster, policy) + { + this.key = key; + this.partition = Partition.Read(cluster, policy, key); + cluster.AddCommandCount(); + } + + protected internal override Node GetNode() + { + return partition.GetNodeRead(cluster); + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.READ; + } + + protected internal override bool PrepareRetry(bool timeout) + { + partition.PrepareRetryRead(timeout); + return true; + } + + protected internal abstract override void WriteBuffer(); + + protected internal abstract override void ParseResult(Connection conn); + } +} diff --git a/AerospikeClient/Command/OperateCommand.cs b/AerospikeClient/Command/SyncWriteCommand.cs similarity index 51% rename from AerospikeClient/Command/OperateCommand.cs rename to AerospikeClient/Command/SyncWriteCommand.cs index 45737711..3ba42540 100644 --- a/AerospikeClient/Command/OperateCommand.cs +++ b/AerospikeClient/Command/SyncWriteCommand.cs @@ -15,59 +15,56 @@ * the License. */ +using System.Runtime.InteropServices; + namespace Aerospike.Client { - public sealed class OperateCommand : ReadCommand + public abstract class SyncWriteCommand : SyncCommand { - private readonly OperateArgs args; + protected readonly WritePolicy writePolicy; + protected readonly Key key; + private readonly Partition partition; - public OperateCommand(Cluster cluster, Key key, OperateArgs args) - : base(cluster, args.writePolicy, key, args.GetPartition(cluster, key), true) + public SyncWriteCommand(Cluster cluster, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy) { - this.args = args; + this.writePolicy = writePolicy; + this.key = key; + this.partition = Partition.Write(cluster, writePolicy, key); + cluster.AddCommandCount(); } protected internal override bool IsWrite() { - return args.hasWrite; + return true; } protected internal override Node GetNode() { - return args.hasWrite ? partition.GetNodeWrite(cluster) : partition.GetNodeRead(cluster); + return partition.GetNodeWrite(cluster); } protected override Latency.LatencyType GetLatencyType() { - return args.hasWrite ? Latency.LatencyType.WRITE : Latency.LatencyType.READ; + return Latency.LatencyType.WRITE; } - protected internal override void WriteBuffer() + protected internal override bool PrepareRetry(bool timeout) { - SetOperate(args.writePolicy, key, args); + partition.PrepareRetryWrite(timeout); + return true; } - protected internal override void HandleNotFound(int resultCode) + protected internal override void OnInDoubt() { - // Only throw not found exception for command with write operations. - // Read-only command operations return a null record. - if (args.hasWrite) + if (writePolicy.Txn != null) { - throw new AerospikeException(resultCode); + writePolicy.Txn.OnWriteInDoubt(key); } } - protected internal override bool PrepareRetry(bool timeout) - { - if (args.hasWrite) - { - partition.PrepareRetryWrite(timeout); - } - else - { - partition.PrepareRetryRead(timeout); - } - return true; - } + protected internal abstract override void WriteBuffer(); + + protected internal abstract override void ParseResult(Connection conn); } } diff --git a/AerospikeClient/Command/TouchCommand.cs b/AerospikeClient/Command/TouchCommand.cs index 675f0e2a..1d1913bf 100644 --- a/AerospikeClient/Command/TouchCommand.cs +++ b/AerospikeClient/Command/TouchCommand.cs @@ -1,106 +1,74 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class TouchCommand : SyncCommand - { - private readonly WritePolicy writePolicy; - private readonly Key key; - private readonly Partition partition; - private readonly bool failOnNotFound; - internal bool Touched { get; private set; } - - public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.Write(cluster, writePolicy, key); - this.failOnNotFound = true; - cluster.AddTran(); - } - - public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key, bool failOnNotFound) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.Write(cluster, writePolicy, key); - this.failOnNotFound = failOnNotFound; - cluster.AddTran(); - } - - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode() - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - - protected internal override void WriteBuffer() - { - SetTouch(writePolicy, key); - } - - protected internal override void ParseResult(Connection conn) - { - ParseHeader(conn); - - if (resultCode == 0) - { - Touched = true; - return; - } - - Touched = false; - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) - { - if (failOnNotFound) - { - throw new AerospikeException(resultCode); - } - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (writePolicy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - return; - } - - throw new AerospikeException(resultCode); - } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TouchCommand : SyncWriteCommand + { + private readonly bool failOnNotFound; + internal bool Touched { get; private set; } + public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + this.failOnNotFound = true; + } + + public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key, bool failOnNotFound) + : base(cluster, writePolicy, key) + { + this.failOnNotFound = failOnNotFound; + } + + protected internal override void WriteBuffer() + { + SetTouch(writePolicy, key); + } + + protected internal override void ParseResult(Connection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK) + { + Touched = true; + return; + } + + Touched = false; + if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + if (failOnNotFound) + { + throw new AerospikeException(resultCode); + } + return; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (writePolicy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return; + } + + throw new AerospikeException(resultCode); + } + } +} diff --git a/AerospikeClient/Command/TxnAddKeys.cs b/AerospikeClient/Command/TxnAddKeys.cs new file mode 100644 index 00000000..51d55c3f --- /dev/null +++ b/AerospikeClient/Command/TxnAddKeys.cs @@ -0,0 +1,50 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TxnAddKeys : SyncWriteCommand + { + private readonly OperateArgs args; + private readonly Txn txn; + + public TxnAddKeys (Cluster cluster, Key key, OperateArgs args, Txn txn) + : base(cluster, args.writePolicy, key) + { + this.args = args; + this.txn = txn; + } + + protected internal override void WriteBuffer() + { + SetTxnAddKeys(args.writePolicy, key, args); + } + + protected internal override void ParseResult(Connection conn) + { + ParseHeader(conn); + ParseTxnDeadline(txn); + + if (resultCode == ResultCode.OK) + { + return; + } + + throw new AerospikeException(resultCode); + } + } +} diff --git a/AerospikeClient/Command/TxnClose.cs b/AerospikeClient/Command/TxnClose.cs new file mode 100644 index 00000000..73df0db0 --- /dev/null +++ b/AerospikeClient/Command/TxnClose.cs @@ -0,0 +1,52 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TxnClose : SyncWriteCommand + { + private readonly Txn txn; + + public TxnClose(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + this.txn = txn; + } + + protected internal override void WriteBuffer() + { + SetTxnClose(txn, key); + } + + protected internal override void ParseResult(Connection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + return; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + } +} diff --git a/AerospikeClient/Command/TxnMarkRollForward.cs b/AerospikeClient/Command/TxnMarkRollForward.cs new file mode 100644 index 00000000..a59b86a6 --- /dev/null +++ b/AerospikeClient/Command/TxnMarkRollForward.cs @@ -0,0 +1,51 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TxnMarkRollForward : SyncWriteCommand + { + public TxnMarkRollForward(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + } + + protected internal override void WriteBuffer() + { + SetTxnMarkRollForward(key); + } + + protected internal override void ParseResult(Connection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + // MRT_COMMITTED is considered a success because it means a previous attempt already + // succeeded in notifying the server that the MRT will be rolled forward. + if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED) + { + return; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + } +} diff --git a/AerospikeClient/Command/TxnMonitor.cs b/AerospikeClient/Command/TxnMonitor.cs new file mode 100644 index 00000000..b0826e3b --- /dev/null +++ b/AerospikeClient/Command/TxnMonitor.cs @@ -0,0 +1,171 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TxnMonitor + { + private static readonly ListPolicy OrderedListPolicy = new(ListOrder.ORDERED, + ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL); + + private static readonly string BinNameId = "id"; + private static readonly string BinNameDigests = "keyds"; + + public static void AddKey(Cluster cluster, WritePolicy policy, Key cmdKey) + { + Txn txn = policy.Txn; + + if (txn.Writes.Contains(cmdKey)) + { + // Transaction monitor already contains this key. + return; + } + + Operation[] ops = GetTxnOps(txn, cmdKey); + AddWriteKeys(cluster, policy, ops); + } + + public static void AddKeys(Cluster cluster, BatchPolicy policy, Key[] keys) + { + Operation[] ops = GetTxnOps(policy.Txn, keys); + AddWriteKeys(cluster, policy, ops); + } + + public static void AddKeys(Cluster cluster, BatchPolicy policy, List records) + { + Operation[] ops = GetTxnOps(policy.Txn, records); + + if (ops != null) + { + AddWriteKeys(cluster, policy, ops); + } + } + + public static Operation[] GetTxnOps(Txn txn, Key cmdKey) + { + txn.VerifyCommand(); + txn.SetNamespace(cmdKey.ns); + + if (txn.MonitorExists()) + { + // No existing monitor record. + return new Operation[] { + ListOperation.Append(OrderedListPolicy, BinNameDigests, Value.Get(cmdKey.digest)) + }; + } + else + { + return new Operation[] { + Operation.Put(new Bin(BinNameId, txn.Id)), + ListOperation.Append(OrderedListPolicy, BinNameDigests, Value.Get(cmdKey.digest)) + }; + } + } + + public static Operation[] GetTxnOps(Txn txn, Key[] keys) + { + txn.VerifyCommand(); + + List list = new(keys.Length); + + foreach (Key key in keys) + { + txn.SetNamespace(key.ns); + list.Add(Value.Get(key.digest)); + } + return GetTxnOps(txn, list); + } + + public static Operation[] GetTxnOps(Txn txn, List records) + { + txn.VerifyCommand(); + + List list = new(records.Count); + + foreach (BatchRecord br in records) { + txn.SetNamespace(br.key.ns); + + if (br.hasWrite) + { + list.Add(Value.Get(br.key.digest)); + } + } + + if (list.Count == 0) + { + // Readonly batch does not need to add key digests. + return null; + } + return GetTxnOps(txn, list); + } + + private static Operation[] GetTxnOps(Txn txn, List list) + { + if (txn.MonitorExists()) + { + // No existing monitor record. + return new Operation[] { + ListOperation.AppendItems(OrderedListPolicy, BinNameDigests, list) + }; + } + else + { + return new Operation[] { + Operation.Put(new Bin(BinNameId, txn.Id)), + ListOperation.AppendItems(OrderedListPolicy, BinNameDigests, list) + }; + } + } + + private static void AddWriteKeys(Cluster cluster, Policy policy, Operation[] ops) + { + Txn txn = policy.Txn; + Key txnKey = GetTxnMonitorKey(policy.Txn); + WritePolicy wp = CopyTimeoutPolicy(policy); + OperateArgs args = new(wp, null, null, ops); + TxnAddKeys cmd = new(cluster, txnKey, args, txn); + cmd.Execute(); + } + + public static Key GetTxnMonitorKey(Txn txn) + { + return new Key(txn.Ns, " + { + if (max == 0) return false; + + records = new BatchRecord[max]; + keys = new Key[max]; + versions = new long?[max]; + return true; + }, + (key, value, count) => + { + keys[count] = key; + records[count] = new BatchRecord(key, false); + versions[count] = value; + }); + + if (!actionPerformed) // If no action was performed, there are no elements. Return. + { + return; + } + + this.verifyRecords = records; + + BatchStatus status = new(true); + List bns = BatchNode.GenerateList(cluster, verifyPolicy, keys, records, false, status); + BatchCommand[] commands = new BatchCommand[bns.Count]; + + int count = 0; + + foreach (BatchNode bn in bns) + { + commands[count++] = new BatchTxnVerify( + cluster, bn, verifyPolicy, keys, versions, records, status); + } + + BatchExecutor.Execute(cluster, verifyPolicy, commands, status); + + if (!status.GetStatus()) + { + throw new AerospikeException("Failed to verify one or more record versions"); + } + } + + private void MarkRollForward(WritePolicy writePolicy, Key txnKey) + { + // Tell MRT monitor that a roll-forward will commence. + TxnMarkRollForward cmd = new(cluster, txn, writePolicy, txnKey); + cmd.Execute(); + } + + private void Roll(BatchPolicy rollPolicy, int txnAttr) + { + BatchRecord[] records = null; + Key[] keys = null; + + bool actionPerformed = txn.Writes.PerformActionOnEachElement(max => + { + if (max == 0) return false; + + records = new BatchRecord[max]; + keys = new Key[max]; + return true; + }, + (item, count) => + { + keys[count] = item; + records[count] = new BatchRecord(item, true); + }); + + if (!actionPerformed) + { + return; + } + + this.rollRecords = records; + + BatchAttr attr = new(); + attr.SetTxn(txnAttr); + BatchStatus status = new(true); + + // generate() requires a null transaction instance. + List bns = BatchNode.GenerateList(cluster, rollPolicy, keys, records, true, status); + BatchCommand[] commands = new BatchCommand[bns.Count]; + + int count = 0; + + foreach (BatchNode bn in bns) + { + commands[count++] = new BatchTxnRoll( + cluster, bn, rollPolicy, txn, keys, records, attr, status); + } + BatchExecutor.Execute(cluster, rollPolicy, commands, status); + + if (!status.GetStatus()) + { + string rollString = txnAttr == Command.INFO4_MRT_ROLL_FORWARD ? "commit" : "abort"; + throw new AerospikeException("Failed to " + rollString + " one or more records"); + } + } + + private void Close(WritePolicy writePolicy, Key txnKey) + { + // Delete MRT monitor on server. + TxnClose cmd = new(cluster, txn, writePolicy, txnKey); + cmd.Execute(); + + // Reset MRT on client. + txn.Clear(); + } + } +} diff --git a/AerospikeClient/Command/WriteCommand.cs b/AerospikeClient/Command/WriteCommand.cs index 018fc94d..5f275729 100644 --- a/AerospikeClient/Command/WriteCommand.cs +++ b/AerospikeClient/Command/WriteCommand.cs @@ -1,86 +1,59 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class WriteCommand : SyncCommand - { - private readonly WritePolicy writePolicy; - private readonly Key key; - private readonly Partition partition; - private readonly Bin[] bins; - private readonly Operation.Type operation; - - public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bins, Operation.Type operation) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.Write(cluster, writePolicy, key); - this.bins = bins; - this.operation = operation; - cluster.AddTran(); - } - - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode() - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - - protected internal override void WriteBuffer() - { - SetWrite(writePolicy, operation, key, bins); - } - - protected internal override void ParseResult(Connection conn) - { - ParseHeader(conn); - - if (resultCode == 0) - { - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (writePolicy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - return; - } - - throw new AerospikeException(resultCode); - } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class WriteCommand : SyncWriteCommand + { + private readonly Bin[] bins; + private readonly Operation.Type operation; + + public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bins, Operation.Type operation) + : base(cluster, writePolicy, key) + { + this.bins = bins; + this.operation = operation; + } + + protected internal override void WriteBuffer() + { + SetWrite(writePolicy, operation, key, bins); + } + + protected internal override void ParseResult(Connection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK) + { + return; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (writePolicy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return; + } + + throw new AerospikeException(resultCode); + } + } +} diff --git a/AerospikeClient/Exp/Exp.cs b/AerospikeClient/Exp/Exp.cs index 692077b6..dd3e0de9 100644 --- a/AerospikeClient/Exp/Exp.cs +++ b/AerospikeClient/Exp/Exp.cs @@ -408,12 +408,12 @@ public static Exp TTL() } /// - /// Create expression that returns if record has been deleted and is still in tombstone state. + /// Create expression that returns if record has been deleted and is still in tombstone State. /// This expression usually evaluates quickly because record meta data is cached in memory. /// /// /// - /// // Deleted records that are in tombstone state. + /// // Deleted records that are in tombstone State. /// Exp.isTombstone() /// /// diff --git a/AerospikeClient/Listener/AbortListener.cs b/AerospikeClient/Listener/AbortListener.cs new file mode 100644 index 00000000..b5bb878b --- /dev/null +++ b/AerospikeClient/Listener/AbortListener.cs @@ -0,0 +1,31 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using static Aerospike.Client.AbortStatus; + +namespace Aerospike.Client +{ + /// + /// Asynchronous result notifications for multi-record transaction (MRT) aborts. + /// + public interface AbortListener + { + /// + /// This method is called when the abort succeeded or will succeed. + /// + void OnSuccess(AbortStatusType status); + } +} diff --git a/AerospikeClient/Listener/CommitListener.cs b/AerospikeClient/Listener/CommitListener.cs new file mode 100644 index 00000000..2629c7de --- /dev/null +++ b/AerospikeClient/Listener/CommitListener.cs @@ -0,0 +1,37 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using static Aerospike.Client.CommitStatus; + +namespace Aerospike.Client +{ + /// + /// Asynchronous result notifications for multi-record transaction (MRT) commits. + /// + public interface CommitListener + { + /// + /// This method is called when the records are verified and the commit succeeded or will succeed. + /// + void OnSuccess(CommitStatusType status); + + /// + /// This method is called when the commit fails. + /// + /// error that occurred + void OnFailure(AerospikeException.Commit exception); + } +} diff --git a/AerospikeClient/Main/AbortStatus.cs b/AerospikeClient/Main/AbortStatus.cs new file mode 100644 index 00000000..94f2dee2 --- /dev/null +++ b/AerospikeClient/Main/AbortStatus.cs @@ -0,0 +1,47 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) abort error status code + /// + public static class AbortStatus + { + public enum AbortStatusType + { + OK, + ALREADY_COMMITTED, + ALREADY_ABORTED, + ROLL_BACK_ABANDONED, + CLOSE_ABANDONED + } + + public static string AbortErrorToString(AbortStatusType status) + { + return status switch + { + AbortStatusType.OK => "Abort succeeded.", + AbortStatusType.ALREADY_COMMITTED => "Already committed.", + AbortStatusType.ALREADY_ABORTED => "Already aborted.", + AbortStatusType.ROLL_BACK_ABANDONED => "MRT client roll back abandoned. Server will eventually abort the MRT.", + AbortStatusType.CLOSE_ABANDONED => "MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.", + _ => "Unexpected AbortStatusType." + }; + } + } +} diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index 617369a4..a91de549 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -1,2469 +1,2705 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Reflection; -using System.Text; - -namespace Aerospike.Client -{ - /// - /// Instantiate an AerospikeClient object to access an Aerospike - /// database cluster and perform database operations. - /// - /// This client is thread-safe. One client instance should be used per cluster. - /// Multiple threads should share this cluster instance. - /// - /// - /// Your application uses this class API to perform database operations such as - /// writing and reading records, and selecting sets of records. Write operations - /// include specialized functionality such as append/prepend and arithmetic - /// addition. - /// - /// - /// Each record may have multiple bins, unless the Aerospike server nodes are - /// configured as "single-bin". In "multi-bin" mode, partial records may be - /// written or read by specifying the relevant subset of bins. - /// - /// - public class AerospikeClient : IDisposable, IAerospikeClient - { - //------------------------------------------------------- - // Member variables. - //------------------------------------------------------- - - protected internal Cluster cluster; - - /// - /// Default read policy that is used when read command policy is null. - /// - public Policy readPolicyDefault; - - /// - /// Default write policy that is used when write command policy is null. - /// - public WritePolicy writePolicyDefault; - - /// - /// Default scan policy that is used when scan command policy is null. - /// - public ScanPolicy scanPolicyDefault; - - /// - /// Default query policy that is used when query command policy is null. - /// - public QueryPolicy queryPolicyDefault; - - /// - /// Default parent policy used in batch read commands. Parent policy fields - /// include socketTimeout, totalTimeout, maxRetries, etc... - /// - public BatchPolicy batchPolicyDefault; - - /// - /// Default parent policy used in batch write commands. Parent policy fields - /// include socketTimeout, totalTimeout, maxRetries, etc... - /// - public BatchPolicy batchParentPolicyWriteDefault; - - /// - /// Default write policy used in batch operate commands. - /// Write policy fields include generation, expiration, durableDelete, etc... - /// - public BatchWritePolicy batchWritePolicyDefault; - - /// - /// Default delete policy used in batch delete commands. - /// - public BatchDeletePolicy batchDeletePolicyDefault; - - /// - /// Default user defined function policy used in batch UDF excecute commands. - /// - public BatchUDFPolicy batchUDFPolicyDefault; - - /// - /// Default info policy that is used when info command policy is null. - /// - public InfoPolicy infoPolicyDefault; - - protected WritePolicy operatePolicyReadDefault; - - //------------------------------------------------------- - // Constructors - //------------------------------------------------------- - - /// - /// Initialize Aerospike client. - /// If the host connection succeeds, the client will: - /// - /// Add host to the cluster map - /// Request host's list of other nodes in cluster - /// Add these nodes to cluster map - /// - /// - /// If the connection succeeds, the client is ready to process database requests. - /// If the connection fails, the cluster will remain in a disconnected state - /// until the server is activated. - /// - /// - /// host name - /// host port - /// if host connection fails - public AerospikeClient(string hostname, int port) - : this(new ClientPolicy(), new Host(hostname, port)) - { - } - - /// - /// Initialize Aerospike client. - /// The client policy is used to set defaults and size internal data structures. - /// If the host connection succeeds, the client will: - /// - /// Add host to the cluster map - /// Request host's list of other nodes in cluster - /// Add these nodes to cluster map - /// - /// - /// If the connection succeeds, the client is ready to process database requests. - /// If the connection fails and the policy's failOnInvalidHosts is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state - /// until the server is activated. - /// - /// - /// client configuration parameters, pass in null for defaults - /// host name - /// host port - /// if host connection fails - public AerospikeClient(ClientPolicy policy, string hostname, int port) - : this(policy, new Host(hostname, port)) - { - } - - /// - /// Initialize Aerospike client with suitable hosts to seed the cluster map. - /// The client policy is used to set defaults and size internal data structures. - /// For the first host connection that succeeds, the client will: - /// - /// Add host to the cluster map - /// Request host's list of other nodes in cluster - /// Add these nodes to cluster map - /// - /// - /// In most cases, only one host is necessary to seed the cluster. The remaining hosts - /// are added as future seeds in case of a complete network failure. - /// - /// - /// If one connection succeeds, the client is ready to process database requests. - /// If all connections fail and the policy's failIfNotConnected is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state - /// until the server is activated. - /// - /// - /// client configuration parameters, pass in null for defaults - /// array of potential hosts to seed the cluster - /// if all host connections fail - public AerospikeClient(ClientPolicy policy, params Host[] hosts) - { - if (policy == null) - { - policy = new ClientPolicy(); - } - this.readPolicyDefault = policy.readPolicyDefault; - this.writePolicyDefault = policy.writePolicyDefault; - this.scanPolicyDefault = policy.scanPolicyDefault; - this.queryPolicyDefault = policy.queryPolicyDefault; - this.batchPolicyDefault = policy.batchPolicyDefault; - this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; - this.batchWritePolicyDefault = policy.batchWritePolicyDefault; - this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; - this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; - this.infoPolicyDefault = policy.infoPolicyDefault; - this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); - - cluster = new Cluster(policy, hosts); - cluster.InitTendThread(policy.failIfNotConnected); - } - - /// - /// Construct client without initialization. - /// Should only be used by classes inheriting from this client. - /// - protected internal AerospikeClient(ClientPolicy policy) - { - if (policy != null) - { - this.readPolicyDefault = policy.readPolicyDefault; - this.writePolicyDefault = policy.writePolicyDefault; - this.scanPolicyDefault = policy.scanPolicyDefault; - this.queryPolicyDefault = policy.queryPolicyDefault; - this.batchPolicyDefault = policy.batchPolicyDefault; - this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; - this.batchWritePolicyDefault = policy.batchWritePolicyDefault; - this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; - this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; - this.infoPolicyDefault = policy.infoPolicyDefault; - } - else - { - this.readPolicyDefault = new Policy(); - this.writePolicyDefault = new WritePolicy(); - this.scanPolicyDefault = new ScanPolicy(); - this.queryPolicyDefault = new QueryPolicy(); - this.batchPolicyDefault = BatchPolicy.ReadDefault(); - this.batchParentPolicyWriteDefault = BatchPolicy.WriteDefault(); - this.batchWritePolicyDefault = new BatchWritePolicy(); - this.batchDeletePolicyDefault = new BatchDeletePolicy(); - this.batchUDFPolicyDefault = new BatchUDFPolicy(); - this.infoPolicyDefault = new InfoPolicy(); - } - this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); - } - - //------------------------------------------------------- - // Operations policies - //------------------------------------------------------- - - /// - /// Default read policy that is used when read command policy is null. - /// - public Policy ReadPolicyDefault - { - get { return readPolicyDefault; } - set { readPolicyDefault = value; } - } - - /// - /// Default write policy that is used when write command policy is null. - /// - public WritePolicy WritePolicyDefault - { - get { return writePolicyDefault; } - set { writePolicyDefault = value; } - } - - /// - /// Default scan policy that is used when scan command policy is null. - /// - public ScanPolicy ScanPolicyDefault - { - get { return scanPolicyDefault; } - set { scanPolicyDefault = value; } - } - - /// - /// Default query policy that is used when query command policy is null. - /// - public QueryPolicy QueryPolicyDefault - { - get { return queryPolicyDefault; } - set { queryPolicyDefault = value; } - } - - /// - /// Default parent policy used in batch read commands.Parent policy fields - /// include socketTimeout, totalTimeout, maxRetries, etc... - /// - public BatchPolicy BatchPolicyDefault - { - get { return batchPolicyDefault; } - set { batchPolicyDefault = value; } - } - - /// - /// Default parent policy used in batch write commands. Parent policy fields - /// include socketTimeout, totalTimeout, maxRetries, etc... - /// - public BatchPolicy BatchParentPolicyWriteDefault - { - get { return batchParentPolicyWriteDefault; } - set { batchParentPolicyWriteDefault = value; } - } - - /// - /// Default write policy used in batch operate commands. - /// Write policy fields include generation, expiration, durableDelete, etc... - /// - public BatchWritePolicy BatchWritePolicyDefault - { - get { return batchWritePolicyDefault; } - set { batchWritePolicyDefault = value; } - } - - /// - /// Default delete policy used in batch delete commands. - /// - public BatchDeletePolicy BatchDeletePolicyDefault - { - get { return batchDeletePolicyDefault; } - set { batchDeletePolicyDefault = value; } - } - - /// - /// Default user defined function policy used in batch UDF excecute commands. - /// - public BatchUDFPolicy BatchUDFPolicyDefault - { - get { return batchUDFPolicyDefault; } - set { batchUDFPolicyDefault = value; } - } - - /// - /// Default info policy that is used when info command policy is null. - /// - public InfoPolicy InfoPolicyDefault - { - get { return infoPolicyDefault; } - set { infoPolicyDefault = value; } - } - - //------------------------------------------------------- - // Cluster Connection Management - //------------------------------------------------------- - - public bool Disposed { get; private set; } - private void Dispose(bool disposing) - { - if (!Disposed) - { - if (disposing) - { - this.Close(); - } - - Disposed = true; - } - } - - /// - /// Close all client connections to database server nodes. - /// - public void Dispose() - { - // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method - Dispose(disposing: true); - GC.SuppressFinalize(this); - } - - /// - /// Close all client connections to database server nodes. - /// - public void Close() - { - cluster.Close(); - } - - /// - /// Return if we are ready to talk to the database server cluster. - /// - public bool Connected - { - get - { - return cluster.Connected; - } - } - - /// - /// Cluster associated with this AerospikeClient instance. - /// - public Cluster Cluster - { - get - { - return cluster; - } - } - - /// - /// Return array of active server nodes in the cluster. - /// - public Node[] Nodes - { - get - { - return cluster.Nodes; - } - } - - /// - /// Enable extended periodic cluster and node latency metrics. - /// - public void EnableMetrics(MetricsPolicy metricsPolicy) - { - cluster.EnableMetrics(metricsPolicy); - } - - /// - /// Disable extended periodic cluster and node latency metrics. - /// - public void DisableMetrics() - { - cluster.DisableMetrics(); - } - - /// - /// Return operating cluster statistics snapshot. - /// - public ClusterStats GetClusterStats() - { - return cluster.GetStats(); - } - - //------------------------------------------------------- - // Write Record Operations - //------------------------------------------------------- - - /// - /// Write record bin(s). - /// The policy specifies the transaction timeout, record expiration and how the transaction is - /// handled when the record already exists. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// array of bin name/value pairs - /// if write fails - public void Put(WritePolicy policy, Key key, params Bin[] bins) - { - if (policy == null) - { - policy = writePolicyDefault; - } - WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.WRITE); - command.Execute(); - } - - //------------------------------------------------------- - // String Operations - //------------------------------------------------------- - - /// - /// Append bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is - /// handled when the record already exists. - /// This call only works for string values. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// array of bin name/value pairs - /// if append fails - public void Append(WritePolicy policy, Key key, params Bin[] bins) - { - if (policy == null) - { - policy = writePolicyDefault; - } - WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.APPEND); - command.Execute(); - } - - /// - /// Prepend bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is - /// handled when the record already exists. - /// This call works only for string values. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// array of bin name/value pairs - /// if prepend fails - public void Prepend(WritePolicy policy, Key key, params Bin[] bins) - { - if (policy == null) - { - policy = writePolicyDefault; - } - WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.PREPEND); - command.Execute(); - } - - //------------------------------------------------------- - // Arithmetic Operations - //------------------------------------------------------- - - /// - /// Add integer/double bin values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is - /// handled when the record already exists. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// array of bin name/value pairs - /// if add fails - public void Add(WritePolicy policy, Key key, params Bin[] bins) - { - if (policy == null) - { - policy = writePolicyDefault; - } - WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.ADD); - command.Execute(); - } - - //------------------------------------------------------- - // Delete Operations - //------------------------------------------------------- - - /// - /// Delete record for specified key. - /// Return whether record existed on server before deletion. - /// The policy specifies the transaction timeout. - /// - /// delete configuration parameters, pass in null for defaults - /// unique record identifier - /// if delete fails - public bool Delete(WritePolicy policy, Key key) - { - if (policy == null) - { - policy = writePolicyDefault; - } - DeleteCommand command = new DeleteCommand(cluster, policy, key); - command.Execute(); - return command.Existed(); - } - - /// - /// Delete records for specified keys. If a key is not found, the corresponding result - /// will be . - /// - /// Requires server version 6.0+ - /// - /// - /// batch configuration parameters, pass in null for defaults - /// delete configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// which contains results for keys that did complete - public BatchResults Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Key[] keys) - { - if (keys.Length == 0) - { - return new BatchResults(new BatchRecord[0], true); - } - - if (batchPolicy == null) - { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (deletePolicy == null) - { - deletePolicy = batchDeletePolicyDefault; - } - - BatchAttr attr = new BatchAttr(); - attr.SetDelete(deletePolicy); - - BatchRecord[] records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - try - { - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, null, records, attr, status); - } - - BatchExecutor.Execute(cluster, batchPolicy, commands, status); - return new BatchResults(records, status.GetStatus()); - } - catch (Exception e) - { - // Batch terminated on fatal error. - throw new AerospikeException.BatchRecordArray(records, e); - } - } - - /// - /// Remove records in specified namespace/set efficiently. This method is many orders of magnitude - /// faster than deleting records one at a time. - /// - /// See https://www.aerospike.com/docs/reference/info#truncate - /// - /// - /// This asynchronous server call may return before the truncation is complete. The user can still - /// write new records after the server returns because new records will have last update times - /// greater than the truncate cutoff (set at the time of truncate call). - /// - /// - /// info command configuration parameters, pass in null for defaults - /// required namespace - /// optional set name. Pass in null to delete all sets in namespace. - /// - /// optionally delete records before record last update time. - /// If specified, value must be before the current time. - /// Pass in null to delete all records in namespace/set regardless of last update time. - /// - public void Truncate(InfoPolicy policy, string ns, string set, DateTime? beforeLastUpdate) - { - if (policy == null) - { - policy = infoPolicyDefault; - } - - // Send truncate command to one node. That node will distribute the command to other nodes. - Node node = cluster.GetRandomNode(); - - StringBuilder sb = new StringBuilder(200); - - if (set != null) - { - sb.Append("truncate:namespace="); - sb.Append(ns); - sb.Append(";set="); - sb.Append(set); - } - else - { - sb.Append("truncate-namespace:namespace="); - sb.Append(ns); - } - - if (beforeLastUpdate.HasValue) - { - sb.Append(";lut="); - // Convert to nanoseconds since unix epoch. - sb.Append(Util.NanosFromEpoch(beforeLastUpdate.Value)); - } - - string response = Info.Request(policy, node, sb.ToString()); - - if (!response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) - { - throw new AerospikeException("Truncate failed: " + response); - } - } - - //------------------------------------------------------- - // Touch Operations - //------------------------------------------------------- - - /// - /// Reset record's time to expiration using the policy's expiration. - /// If the record does not exist, it can't be created because the server deletes empty records. - /// Throw an exception if the record does not exist. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// if touch fails - public void Touch(WritePolicy policy, Key key) - { - if (policy == null) - { - policy = writePolicyDefault; - } - TouchCommand command = new TouchCommand(cluster, policy, key); - command.Execute(); - } - - /// - /// Reset record's time to expiration using the policy's expiration. - /// If the record does not exist, it can't be created because the server deletes empty records. - /// Return true if the record exists and is touched.Return false if the record does not exist. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// true if record was touched, false otherwise - /// if touch fails - public bool Touched(WritePolicy policy, Key key) - { - if (policy == null) - { - policy = writePolicyDefault; - } - TouchCommand command = new(cluster, policy, key, false); - command.Execute(); - - return command.Touched; - } - - //------------------------------------------------------- - // Existence-Check Operations - //------------------------------------------------------- - - /// - /// Determine if a record key exists. - /// Return whether record exists or not. - /// The policy can be used to specify timeouts. - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// if command fails - public bool Exists(Policy policy, Key key) - { - if (policy == null) - { - policy = readPolicyDefault; - } - ExistsCommand command = new ExistsCommand(cluster, policy, key); - command.Execute(); - return command.Exists(); - } - - /// - /// Check if multiple record keys exist in one batch call. - /// The returned boolean array is in positional order with the original key array order. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// which contains results for keys that did complete - public bool[] Exists(BatchPolicy policy, Key[] keys) - { - if (keys.Length == 0) - { - return new bool[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - - bool[] existsArray = new bool[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status); - BatchExecutor.Execute(command, status); - return existsArray; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return existsArray; - } - catch (Exception e) - { - throw new AerospikeException.BatchExists(existsArray, e); - } - } - - //------------------------------------------------------- - // Read Record Operations - //------------------------------------------------------- - - /// - /// Read entire record for specified key. - /// If found, return record instance. If not found, return null. - /// The policy can be used to specify timeouts. - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// if read fails - public Record Get(Policy policy, Key key) - { - if (policy == null) - { - policy = readPolicyDefault; - } - ReadCommand command = new ReadCommand(cluster, policy, key); - command.Execute(); - return command.Record; - } - - /// - /// Read record header and bins for specified key. - /// If found, return record instance. If not found, return null. - /// The policy can be used to specify timeouts. - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// bins to retrieve - /// if read fails - public Record Get(Policy policy, Key key, params string[] binNames) - { - if (policy == null) - { - policy = readPolicyDefault; - } - ReadCommand command = new ReadCommand(cluster, policy, key, binNames); - command.Execute(); - return command.Record; - } - - /// - /// Read record generation and expiration only for specified key. Bins are not read. - /// If found, return record instance. If not found, return null. - /// The policy can be used to specify timeouts. - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// if read fails - public Record GetHeader(Policy policy, Key key) - { - if (policy == null) - { - policy = readPolicyDefault; - } - ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key); - command.Execute(); - return command.Record; - } - - //------------------------------------------------------- - // Batch Read Operations - //------------------------------------------------------- - - /// - /// Read multiple records for specified batch keys in one batch call. - /// This method allows different namespaces/bins to be requested for each key in the batch. - /// The returned records are located in the same list. - /// If the BatchRead key field is not found, the corresponding record field will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// list of unique record identifiers and the bins to retrieve. - /// The returned records are located in the same list. - /// true if all batch key requests succeeded - /// if read fails - public bool Get(BatchPolicy policy, List records) - { - if (records.Count == 0) - { - return true; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchReadListCommand(cluster, batchNode, policy, records, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return status.GetStatus(); - } - - /// - /// Read multiple records for specified keys in one batch call. - /// The returned records are in positional order with the original key array order. - /// If a key is not found, the positional record will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// which contains results for keys that did complete - public Record[] Get(BatchPolicy policy, Key[] keys) - { - if (keys.Length == 0) - { - return new Record[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - Record[] records = new Record[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status); - BatchExecutor.Execute(command, status); - return records; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return records; - } - catch (Exception e) - { - throw new AerospikeException.BatchRecords(records, e); - } - } - - /// - /// Read multiple record headers and bins for specified keys in one batch call. - /// The returned records are in positional order with the original key array order. - /// If a key is not found, the positional record will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// array of bins to retrieve - /// which contains results for keys that did complete - public Record[] Get(BatchPolicy policy, Key[] keys, params string[] binNames) - { - if (keys.Length == 0) - { - return new Record[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - Record[] records = new Record[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status); - BatchExecutor.Execute(command, status); - return records; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return records; - } - catch (Exception e) - { - throw new AerospikeException.BatchRecords(records, e); - } - } - - /// - /// Read multiple records for specified keys using read operations in one batch call. - /// The returned records are in positional order with the original key array order. - /// If a key is not found, the positional record will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// array of read operations on record - /// which contains results for keys that did complete - public Record[] Get(BatchPolicy policy, Key[] keys, params Operation[] ops) - { - if (keys.Length == 0) - { - return new Record[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - Record[] records = new Record[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status); - BatchExecutor.Execute(command, status); - return records; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return records; - } - catch (Exception e) - { - throw new AerospikeException.BatchRecords(records, e); - } - } - - /// - /// Read multiple record header data for specified keys in one batch call. - /// The returned records are in positional order with the original key array order. - /// If a key is not found, the positional record will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// which contains results for keys that did complete - public Record[] GetHeader(BatchPolicy policy, Key[] keys) - { - if (keys.Length == 0) - { - return new Record[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - Record[] records = new Record[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status); - BatchExecutor.Execute(command, status); - return records; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return records; - } - catch (Exception e) - { - throw new AerospikeException.BatchRecords(records, e); - } - } - - //------------------------------------------------------- - // Join methods - //------------------------------------------------------- - - /// - /// Read specified bins in left record and then join with right records. Each join bin name - /// (Join.leftKeysBinName) must exist in the left record. The join bin must contain a list of - /// keys. Those key are used to retrieve other records using a separate batch get. - /// - /// generic configuration parameters, pass in null for defaults - /// unique main record identifier - /// array of bins to retrieve - /// array of join definitions - /// if main read or join reads fail - public Record Join(BatchPolicy policy, Key key, string[] binNames, params Join[] joins) - { - string[] names = new string[binNames.Length + joins.Length]; - int count = 0; - - foreach (string binName in binNames) - { - names[count++] = binName; - } - - foreach (Join join in joins) - { - names[count++] = join.leftKeysBinName; - } - Record record = Get(policy, key, names); - JoinRecords(policy, record, joins); - return record; - } - - /// - /// Read all bins in left record and then join with right records. Each join bin name - /// (Join.binNameKeys) must exist in the left record. The join bin must contain a list of - /// keys. Those key are used to retrieve other records using a separate batch get. - /// - /// generic configuration parameters, pass in null for defaults - /// unique main record identifier - /// array of join definitions - /// if main read or join reads fail - public Record Join(BatchPolicy policy, Key key, params Join[] joins) - { - Record record = Get(policy, key); - JoinRecords(policy, record, joins); - return record; - } - - //------------------------------------------------------- - // Generic Database Operations - //------------------------------------------------------- - - /// - /// Perform multiple read/write operations on a single key in one batch call. - /// An example would be to add an integer value to an existing record and then - /// read the result, all in one database call. - /// - /// The server executes operations in the same order as the operations array. - /// Both scalar bin operations (Operation) and CDT bin operations (ListOperation, - /// MapOperation) can be performed in same call. - /// - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// database operations to perform - /// if command fails - public Record Operate(WritePolicy policy, Key key, params Operation[] operations) - { - OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, key, operations); - OperateCommand command = new OperateCommand(cluster, key, args); - command.Execute(); - return command.Record; - } - - //------------------------------------------------------- - // Batch Read/Write Operations - //------------------------------------------------------- - - /// - /// Read/Write multiple records for specified batch keys in one batch call. - /// This method allows different namespaces/bins for each key in the batch. - /// The returned records are located in the same list. - /// - /// can be , , or - /// . - /// - /// - /// Requires server version 6.0+ - /// - /// - /// batch configuration parameters, pass in null for defaults - /// list of unique record identifiers and read/write operations - /// true if all batch sub-commands succeeded - /// if command fails - public bool Operate(BatchPolicy policy, List records) - { - if (records.Count == 0) - { - return true; - } - - if (policy == null) - { - policy = batchParentPolicyWriteDefault; - } - - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchOperateListCommand(cluster, batchNode, policy, records, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return status.GetStatus(); - } - - /// - /// Perform read/write operations on multiple keys. If a key is not found, the corresponding result - /// will be . - /// - /// Requires server version 6.0+ - /// - /// - /// batch configuration parameters, pass in null for defaults - /// write configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// - /// read/write operations to perform. is not allowed because it returns a - /// variable number of bins and makes it difficult (sometimes impossible) to lineup operations with - /// results. Instead, use for each bin name. - /// - /// which contains results for keys that did complete - public BatchResults Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Key[] keys, params Operation[] ops) - { - if (keys.Length == 0) - { - return new BatchResults(new BatchRecord[0], true); - } - - if (batchPolicy == null) - { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (writePolicy == null) - { - writePolicy = batchWritePolicyDefault; - } - - BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); - BatchRecord[] records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - try - { - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status); - } - - BatchExecutor.Execute(cluster, batchPolicy, commands, status); - return new BatchResults(records, status.GetStatus()); - } - catch (Exception e) - { - throw new AerospikeException.BatchRecordArray(records, e); - } - } - - //------------------------------------------------------- - // Scan Operations - //------------------------------------------------------- - - /// - /// Read all records in specified namespace and set. If the policy's - /// concurrentNodes is specified, each server node will be read in - /// parallel. Otherwise, server nodes are read in series. - /// - /// This call will block until the scan is complete - callbacks are made - /// within the scope of this call. - /// - /// - /// scan configuration parameters, pass in null for defaults - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// read callback method - called with record data - /// - /// optional bin to retrieve. All bins will be returned if not specified. - /// - /// if scan fails - public void ScanAll(ScanPolicy policy, string ns, string setName, ScanCallback callback, params string[] binNames) - { - if (policy == null) - { - policy = scanPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - PartitionTracker tracker = new PartitionTracker(policy, nodes); - ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); - } - - /// - /// Read all records in specified namespace and set for one node only. - /// The node is specified by name. - /// - /// This call will block until the scan is complete - callbacks are made - /// within the scope of this call. - /// - /// - /// scan configuration parameters, pass in null for defaults - /// server node name - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// read callback method - called with record data - /// - /// optional bin to retrieve. All bins will be returned if not specified. - /// - /// if scan fails - public void ScanNode(ScanPolicy policy, string nodeName, string ns, string setName, ScanCallback callback, params string[] binNames) - { - Node node = cluster.GetNode(nodeName); - ScanNode(policy, node, ns, setName, callback, binNames); - } - - /// - /// Read all records in specified namespace and set for one node only. - /// - /// This call will block until the scan is complete - callbacks are made - /// within the scope of this call. - /// - /// - /// scan configuration parameters, pass in null for defaults - /// server node - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// read callback method - called with record data - /// - /// optional bin to retrieve. All bins will be returned if not specified. - /// - /// if scan fails - public void ScanNode(ScanPolicy policy, Node node, string ns, string setName, ScanCallback callback, params string[] binNames) - { - if (policy == null) - { - policy = scanPolicyDefault; - } - - PartitionTracker tracker = new PartitionTracker(policy, node); - ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); - } - - /// - /// Read records in specified namespace, set and partition filter. - /// - /// This call will block until the scan is complete - callbacks are made - /// within the scope of this call. - /// - /// - /// scan configuration parameters, pass in null for defaults - /// filter on a subset of data partitions - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// read callback method - called with record data - /// optional bin to retrieve. All bins will be returned if not specified. - /// if scan fails - public void ScanPartitions(ScanPolicy policy, PartitionFilter partitionFilter, string ns, string setName, ScanCallback callback, params string[] binNames) - { - if (policy == null) - { - policy = scanPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - PartitionTracker tracker = new PartitionTracker(policy, nodes, partitionFilter); - ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); - } - - //--------------------------------------------------------------- - // User defined functions - //--------------------------------------------------------------- - - /// - /// Register package located in a file containing user defined functions with server. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// RegisterTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// path of client file containing user defined functions, relative to current directory - /// path to store user defined functions on the server, relative to configured script directory. - /// language of user defined functions - /// if register fails - public RegisterTask Register(Policy policy, string clientPath, string serverPath, Language language) - { - if (policy == null) - { - policy = writePolicyDefault; - } - string content = Util.ReadFileEncodeBase64(clientPath); - return RegisterCommand.Register(cluster, policy, content, serverPath, language); - } - - /// - /// Register package located in a resource containing user defined functions with server. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// RegisterTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// assembly where resource is located. Current assembly can be obtained by: Assembly.GetExecutingAssembly() - /// namespace path where Lua resource is located. Example: Aerospike.Client.Resources.mypackage.lua - /// path to store user defined functions on the server, relative to configured script directory. - /// language of user defined functions - /// if register fails - public RegisterTask Register(Policy policy, Assembly resourceAssembly, string resourcePath, string serverPath, Language language) - { - if (policy == null) - { - policy = writePolicyDefault; - } - string content; - using (Stream stream = resourceAssembly.GetManifestResourceStream(resourcePath)) - { - byte[] bytes = new byte[stream.Length]; - stream.Read(bytes, 0, bytes.Length); - content = Convert.ToBase64String(bytes); - } - return RegisterCommand.Register(cluster, policy, content, serverPath, language); - } - - /// - /// Register UDF functions located in a code string with server. Example: - /// - /// String code = @" - /// local function reducer(val1,val2) - /// return val1 + val2 - /// end - /// - /// function sum_single_bin(stream,name) - /// local function mapper(rec) - /// return rec[name] - /// end - /// return stream : map(mapper) : reduce(reducer) - /// end - ///"; - /// - /// client.RegisterUdfString(null, code, "mysum.lua", Language.LUA); - /// - /// - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// RegisterTask instance. - /// - /// - /// generic configuration parameters, pass in null for defaults - /// code string containing user defined functions - /// path to store user defined functions on the server, relative to configured script directory. - /// language of user defined functions - /// if register fails - public RegisterTask RegisterUdfString(Policy policy, string code, string serverPath, Language language) - { - if (policy == null) - { - policy = writePolicyDefault; - } - byte[] bytes = ByteUtil.StringToUtf8(code); - string content = Convert.ToBase64String(bytes); - return RegisterCommand.Register(cluster, policy, content, serverPath, language); - } - - /// - /// Remove user defined function from server nodes. - /// - /// info configuration parameters, pass in null for defaults - /// location of UDF on server nodes. Example: mylua.lua - /// if remove fails - public void RemoveUdf(InfoPolicy policy, string serverPath) - { - if (policy == null) - { - policy = infoPolicyDefault; - } - // Send UDF command to one node. That node will distribute the UDF command to other nodes. - string command = "udf-remove:filename=" + serverPath; - Node node = cluster.GetRandomNode(); - string response = Info.Request(policy, node, command); - - if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) - { - return; - } - - if (response.StartsWith("error=file_not_found")) - { - // UDF has already been removed. - return; - } - throw new AerospikeException("Remove UDF failed: " + response); - } - - /// - /// Execute user defined function on server and return results. - /// The function operates on a single record. - /// The package name is used to locate the udf file location: - /// - /// udf file = <server udf dir>/<package name>.lua - /// - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// server package name where user defined function resides - /// user defined function - /// arguments passed in to user defined function - /// if transaction fails - public object Execute(WritePolicy policy, Key key, string packageName, string functionName, params Value[] args) - { - if (policy == null) - { - policy = writePolicyDefault; - } - ExecuteCommand command = new ExecuteCommand(cluster, policy, key, packageName, functionName, args); - command.Execute(); - - Record record = command.Record; - - if (record == null || record.bins == null) - { - return null; - } - - IDictionary map = record.bins; - object obj; - - if (map.TryGetValue("SUCCESS", out obj)) - { - return obj; - } - - if (map.TryGetValue("FAILURE", out obj)) - { - throw new AerospikeException(obj.ToString()); - } - throw new AerospikeException("Invalid UDF return value"); - } - - /// - /// Execute user defined function on server for each key and return results. - /// The package name is used to locate the udf file location: - /// - /// udf file = <server udf dir>/<package name>.lua - /// - /// - /// Requires server version 6.0+ - /// - /// - /// batch configuration parameters, pass in null for defaults - /// udf configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// server package name where user defined function resides - /// user defined function - /// arguments passed in to user defined function - /// which contains results for keys that did complete - public BatchResults Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, Key[] keys, string packageName, string functionName, params Value[] functionArgs) - { - if (keys.Length == 0) - { - return new BatchResults(new BatchRecord[0], true); - } - - if (batchPolicy == null) - { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (udfPolicy == null) - { - udfPolicy = batchUDFPolicyDefault; - } - - byte[] argBytes = Packer.Pack(functionArgs); - - BatchAttr attr = new BatchAttr(); - attr.SetUDF(udfPolicy); - - BatchRecord[] records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - try - { - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status); - } - - BatchExecutor.Execute(cluster, batchPolicy, commands, status); - return new BatchResults(records, status.GetStatus()); - } - catch (Exception e) - { - // Batch terminated on fatal error. - throw new AerospikeException.BatchRecordArray(records, e); - } - } - - //---------------------------------------------------------- - // Query/Execute - //---------------------------------------------------------- - - /// - /// Apply user defined function on records that match the background query statement filter. - /// Records are not returned to the client. - /// This asynchronous server call will return before the command is complete. - /// The user can optionally wait for command completion by using the returned - /// ExecuteTask instance. - /// - /// configuration parameters, pass in null for defaults - /// background query definition - /// server package where user defined function resides - /// function name - /// to pass to function name, if any - /// if command fails - public ExecuteTask Execute(WritePolicy policy, Statement statement, string packageName, string functionName, params Value[] functionArgs) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - statement.PackageName = packageName; - statement.FunctionName = functionName; - statement.FunctionArgs = functionArgs; - - cluster.AddTran(); - - ulong taskId = statement.PrepareTaskId(); - Node[] nodes = cluster.ValidateNodes(); - Executor executor = new Executor(nodes.Length); - - foreach (Node node in nodes) - { - ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId); - executor.AddCommand(command); - } - - executor.Execute(nodes.Length); - return new ExecuteTask(cluster, policy, statement, taskId); - } - - /// - /// Apply operations on records that match the background query statement filter. - /// Records are not returned to the client. - /// This asynchronous server call will return before the command is complete. - /// The user can optionally wait for command completion by using the returned - /// ExecuteTask instance. - /// - /// write configuration parameters, pass in null for defaults - /// background query definition - /// list of operations to be performed on selected records - /// if command fails - public ExecuteTask Execute(WritePolicy policy, Statement statement, params Operation[] operations) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (operations.Length > 0) - { - statement.Operations = operations; - } - - cluster.AddTran(); - - ulong taskId = statement.PrepareTaskId(); - Node[] nodes = cluster.ValidateNodes(); - Executor executor = new Executor(nodes.Length); - - foreach (Node node in nodes) - { - ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId); - executor.AddCommand(command); - } - executor.Execute(nodes.Length); - return new ExecuteTask(cluster, policy, statement, taskId); - } - - //-------------------------------------------------------- - // Query functions - //-------------------------------------------------------- - - /// - /// Execute query and call action for each record returned from server. - /// - /// generic configuration parameters, pass in null for defaults - /// query definition - /// action methods to be called for each record - /// if query fails - public void Query(QueryPolicy policy, Statement statement, Action action) - { - using (RecordSet rs = Query(policy, statement)) - { - while (rs.Next()) - { - action(rs.Key, rs.Record); - } - } - } - - /// - /// Execute query and return record iterator. The query executor puts records on a queue in - /// separate threads. The calling thread concurrently pops records off the queue through the - /// record iterator. - /// - /// generic configuration parameters, pass in null for defaults - /// query definition - /// if query fails - public RecordSet Query(QueryPolicy policy, Statement statement) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - - if (cluster.hasPartitionQuery || statement.filter == null) - { - PartitionTracker tracker = new PartitionTracker(policy, statement, nodes); - QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker); - return executor.RecordSet; - } - else - { - QueryRecordExecutor executor = new QueryRecordExecutor(cluster, policy, statement, nodes); - executor.Execute(); - return executor.RecordSet; - } - } - - /// - /// Execute query on all server nodes and return records via the listener. This method will - /// block until the query is complete. Listener callbacks are made within the scope of this call. - /// - /// If is not 1, the supplied listener must handle - /// shared data in a thread-safe manner, because the listener will be called by multiple query - /// threads (one thread per node) in parallel. - /// - /// - /// Requires server version 6.0+ if using a secondary index query. - /// - /// - /// query configuration parameters, pass in null for defaults - /// query definition - /// where to send results - /// if query fails - public void Query(QueryPolicy policy, Statement statement, QueryListener listener) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - - if (cluster.hasPartitionQuery || statement.filter == null) - { - PartitionTracker tracker = new PartitionTracker(policy, statement, nodes); - QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker); - } - else - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported"); - } - } - - /// - /// Execute query for specified partitions and return records via the listener. This method will - /// block until the query is complete. Listener callbacks are made within the scope of this call. - /// - /// If is not 1, the supplied listener must handle - /// shared data in a thread-safe manner, because the listener will be called by multiple query - /// threads (one thread per node) in parallel. - /// - /// - /// The completion status of all partitions is stored in the partitionFilter when the query terminates. - /// This partitionFilter can then be used to resume an incomplete query at a later time. - /// This is the preferred method for query terminate/resume functionality. - /// - /// - /// Requires server version 6.0+ if using a secondary index query. - /// - /// - /// query configuration parameters, pass in null for defaults - /// query definition - /// - /// data partition filter. Set to for all partitions. - /// - /// where to send results - /// if query fails - public void Query - ( - QueryPolicy policy, - Statement statement, - PartitionFilter partitionFilter, - QueryListener listener - ) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - - if (cluster.hasPartitionQuery || statement.filter == null) - { - PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter); - QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker); - } - else - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported"); - } - } - - /// - /// Execute query for specified partitions and return record iterator. The query executor puts - /// records on a queue in separate threads. The calling thread concurrently pops records off - /// the queue through the record iterator. - /// - /// Requires server version 6.0+ if using a secondary index query. - /// - /// - /// query configuration parameters, pass in null for defaults - /// query definition - /// filter on a subset of data partitions - /// if query fails - public RecordSet QueryPartitions - ( - QueryPolicy policy, - Statement statement, - PartitionFilter partitionFilter - ) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - - if (cluster.hasPartitionQuery || statement.filter == null) - { - PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter); - QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker); - return executor.RecordSet; - } - else - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "QueryPartitions() not supported"); - } - } - - /// - /// Execute query, apply statement's aggregation function, and return result iterator. - /// The aggregation function should be located in a Lua script file that can be found from the - /// "LuaConfig.PackagePath" paths static variable. The default package path is "udf/?.lua" - /// where "?" is the packageName. - /// - /// The query executor puts results on a queue in separate threads. The calling thread - /// concurrently pops results off the queue through the ResultSet iterator. - /// The aggregation function is called on both server and client (final reduce). - /// Therefore, the Lua script file must also reside on both server and client. - /// - /// - /// query configuration parameters, pass in null for defaults - /// query definition - /// server package where user defined function resides - /// aggregation function name - /// arguments to pass to function name, if any - /// if query fails - public ResultSet QueryAggregate - ( - QueryPolicy policy, - Statement statement, - string packageName, - string functionName, - params Value[] functionArgs - ) - { - statement.SetAggregateFunction(packageName, functionName, functionArgs); - return QueryAggregate(policy, statement); - } - - /// - /// Execute query, apply statement's aggregation function, call action for each aggregation - /// object returned from server. - /// - /// query configuration parameters, pass in null for defaults - /// - /// query definition with aggregate functions already initialized by SetAggregateFunction(). - /// - /// action methods to be called for each aggregation object - /// if query fails - public void QueryAggregate(QueryPolicy policy, Statement statement, Action action) - { - using (ResultSet rs = QueryAggregate(policy, statement)) - { - while (rs.Next()) - { - action(rs.Object); - } - } - } - - /// - /// Execute query, apply statement's aggregation function, and return result iterator. - /// The aggregation function should be initialized via the statement's SetAggregateFunction() - /// and should be located in a Lua resource file located in an assembly. - /// - /// The query executor puts results on a queue in separate threads. The calling thread - /// concurrently pops results off the queue through the ResultSet iterator. - /// The aggregation function is called on both server and client (final reduce). - /// Therefore, the Lua script file must also reside on both server and client. - /// - /// - /// query configuration parameters, pass in null for defaults - /// - /// query definition with aggregate functions already initialized by SetAggregateFunction(). - /// - /// if query fails - public ResultSet QueryAggregate(QueryPolicy policy, Statement statement) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - QueryAggregateExecutor executor = new QueryAggregateExecutor(cluster, policy, statement, nodes); - executor.Execute(); - return executor.ResultSet; - } - - //-------------------------------------------------------- - // Secondary Index functions - //-------------------------------------------------------- - - /// - /// Create scalar secondary index. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// IndexTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// name of secondary index - /// bin name that data is indexed on - /// underlying data type of secondary index - /// if index create fails - public IndexTask CreateIndex - ( - Policy policy, - string ns, - string setName, - string indexName, - string binName, - IndexType indexType - ) - { - return CreateIndex(policy, ns, setName, indexName, binName, indexType, IndexCollectionType.DEFAULT); - } - - /// - /// Create complex secondary index on bins containing collections. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// IndexTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// name of secondary index - /// bin name that data is indexed on - /// underlying data type of secondary index - /// index collection type - /// optional context to index on elements within a CDT - /// if index create fails - public IndexTask CreateIndex - ( - Policy policy, - string ns, - string setName, - string indexName, - string binName, - IndexType indexType, - IndexCollectionType indexCollectionType, - params CTX[] ctx - ) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - StringBuilder sb = new StringBuilder(1024); - sb.Append("sindex-create:ns="); - sb.Append(ns); - - if (setName != null && setName.Length > 0) - { - sb.Append(";set="); - sb.Append(setName); - } - - sb.Append(";indexname="); - sb.Append(indexName); - - if (ctx != null && ctx.Length > 0) - { - byte[] bytes = PackUtil.Pack(ctx); - string base64 = Convert.ToBase64String(bytes); - - sb.Append(";context="); - sb.Append(base64); - } - - if (indexCollectionType != IndexCollectionType.DEFAULT) - { - sb.Append(";indextype="); - sb.Append(indexCollectionType); - } - - sb.Append(";indexdata="); - sb.Append(binName); - sb.Append(","); - sb.Append(indexType); - - // Send index command to one node. That node will distribute the command to other nodes. - String response = SendInfoCommand(policy, sb.ToString()); - - if (response.Equals("OK", StringComparison.CurrentCultureIgnoreCase)) - { - // Return task that could optionally be polled for completion. - return new IndexTask(cluster, policy, ns, indexName, true); - } - - ParseInfoError("Create index failed", response); - return null; - } - - /// - /// Delete secondary index. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// IndexTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// name of secondary index - /// if index drop fails - public IndexTask DropIndex(Policy policy, string ns, string setName, string indexName) - { - if (policy == null) - { - policy = writePolicyDefault; - } - StringBuilder sb = new StringBuilder(500); - sb.Append("sindex-delete:ns="); - sb.Append(ns); - - if (setName != null && setName.Length > 0) - { - sb.Append(";set="); - sb.Append(setName); - } - sb.Append(";indexname="); - sb.Append(indexName); - - // Send index command to one node. That node will distribute the command to other nodes. - String response = SendInfoCommand(policy, sb.ToString()); - - if (response.Equals("OK", StringComparison.CurrentCultureIgnoreCase)) - { - return new IndexTask(cluster, policy, ns, indexName, false); - } - - ParseInfoError("Drop index failed", response); - return null; - } - - //----------------------------------------------------------------- - // XDR - Cross datacenter replication - //----------------------------------------------------------------- - - /// - /// Set XDR filter for given datacenter name and namespace. The expression filter indicates - /// which records XDR should ship to the datacenter. - /// - /// info configuration parameters, pass in null for defaults - /// XDR datacenter name - /// namespace - equivalent to database name - /// expression filter - /// if command fails - public void SetXDRFilter(InfoPolicy policy, string datacenter, string ns, Expression filter) - { - if (policy == null) - { - policy = infoPolicyDefault; - } - - // Send XDR command to one node. That node will distribute the XDR command to other nodes. - string command = "xdr-set-filter:dc=" + datacenter + ";namespace=" + ns + ";exp=" + filter.GetBase64(); - Node node = cluster.GetRandomNode(); - string response = Info.Request(policy, node, command); - - if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) - { - return; - } - - ParseInfoError("xdr-set-filter failed", response); - } - - //------------------------------------------------------- - // User administration - //------------------------------------------------------- - - /// - /// Create user with password and roles. Clear-text password will be hashed using bcrypt - /// before sending to server. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - /// user password in clear-text format - /// variable arguments array of role names. Predefined roles are listed in Role.cs - public void CreateUser(AdminPolicy policy, string user, string password, IList roles) - { - string hash = AdminCommand.HashPassword(password); - AdminCommand command = new AdminCommand(); - command.CreateUser(cluster, policy, user, hash, roles); - } - - /// - /// Remove user from cluster. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - public void DropUser(AdminPolicy policy, string user) - { - AdminCommand command = new AdminCommand(); - command.DropUser(cluster, policy, user); - } - - /// - /// Change user's password. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - /// user password in clear-text format - public void ChangePassword(AdminPolicy policy, string user, string password) - { - if (cluster.user == null) - { - throw new AerospikeException("Invalid user"); - } - - byte[] userBytes = ByteUtil.StringToUtf8(user); - byte[] passwordBytes = ByteUtil.StringToUtf8(password); - - string hash = AdminCommand.HashPassword(password); - byte[] hashBytes = ByteUtil.StringToUtf8(hash); - - AdminCommand command = new AdminCommand(); - - if (Util.ByteArrayEquals(userBytes, cluster.user)) - { - // Change own password. - command.ChangePassword(cluster, policy, userBytes, hash); - } - else - { - // Change other user's password by user admin. - command.SetPassword(cluster, policy, userBytes, hash); - } - cluster.ChangePassword(userBytes, passwordBytes, hashBytes); - } - - /// - /// Add roles to user's list of roles. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - /// role names. Predefined roles are listed in Role.cs - public void GrantRoles(AdminPolicy policy, string user, IList roles) - { - AdminCommand command = new AdminCommand(); - command.GrantRoles(cluster, policy, user, roles); - } - - /// - /// Remove roles from user's list of roles. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - /// role names. Predefined roles are listed in Role.cs - public void RevokeRoles(AdminPolicy policy, string user, IList roles) - { - AdminCommand command = new AdminCommand(); - command.RevokeRoles(cluster, policy, user, roles); - } - - /// - /// Create user defined role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// privileges assigned to the role. - /// if command fails - public void CreateRole(AdminPolicy policy, string roleName, IList privileges) - { - AdminCommand command = new AdminCommand(); - command.CreateRole(cluster, policy, roleName, privileges); - } - - /// - /// Create user defined role with optional privileges and whitelist. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// optional list of privileges assigned to role. - /// - /// optional list of allowable IP addresses assigned to role. - /// IP addresses can contain wildcards (ie. 10.1.2.0/24). - /// - /// if command fails - public void CreateRole(AdminPolicy policy, string roleName, IList privileges, IList whitelist) - { - AdminCommand command = new AdminCommand(); - command.CreateRole(cluster, policy, roleName, privileges, whitelist, 0, 0); - } - - /// - /// Create user defined role with optional privileges, whitelist and read/write quotas. - /// Quotas require server security configuration "enable-quotas" to be set to true. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// optional list of privileges assigned to role. - /// - /// optional list of allowable IP addresses assigned to role. - /// IP addresses can contain wildcards (ie. 10.1.2.0/24). - /// - /// optional maximum reads per second limit, pass in zero for no limit. - /// optional maximum writes per second limit, pass in zero for no limit. - /// if command fails - public void CreateRole - ( - AdminPolicy policy, - string roleName, - IList privileges, - IList whitelist, - int readQuota, - int writeQuota - ) - { - AdminCommand command = new AdminCommand(); - command.CreateRole(cluster, policy, roleName, privileges, whitelist, readQuota, writeQuota); - } - - /// - /// Drop user defined role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// if command fails - public void DropRole(AdminPolicy policy, string roleName) - { - AdminCommand command = new AdminCommand(); - command.DropRole(cluster, policy, roleName); - } - - /// - /// Grant privileges to an user defined role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// privileges assigned to the role. - /// if command fails - public void GrantPrivileges(AdminPolicy policy, string roleName, IList privileges) - { - AdminCommand command = new AdminCommand(); - command.GrantPrivileges(cluster, policy, roleName, privileges); - } - - /// - /// Revoke privileges from an user defined role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// privileges assigned to the role. - /// if command fails - public void RevokePrivileges(AdminPolicy policy, string roleName, IList privileges) - { - AdminCommand command = new AdminCommand(); - command.RevokePrivileges(cluster, policy, roleName, privileges); - } - - /// - /// Set IP address whitelist for a role. If whitelist is null or empty, remove existing whitelist from role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// - /// list of allowable IP addresses or null. - /// IP addresses can contain wildcards (ie. 10.1.2.0/24). - /// - /// if command fails - public void SetWhitelist(AdminPolicy policy, string roleName, IList whitelist) - { - AdminCommand command = new AdminCommand(); - command.SetWhitelist(cluster, policy, roleName, whitelist); - } - - /// - /// Set maximum reads/writes per second limits for a role. If a quota is zero, the limit is removed. - /// Quotas require server security configuration "enable-quotas" to be set to true. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// maximum reads per second limit, pass in zero for no limit. - /// maximum writes per second limit, pass in zero for no limit. - /// if command fails - public void SetQuotas(AdminPolicy policy, string roleName, int readQuota, int writeQuota) - { - AdminCommand command = new AdminCommand(); - command.setQuotas(cluster, policy, roleName, readQuota, writeQuota); - } - - /// - /// Retrieve roles for a given user. - /// - /// admin configuration parameters, pass in null for defaults - /// user name filter - public User QueryUser(AdminPolicy policy, string user) - { - AdminCommand.UserCommand command = new AdminCommand.UserCommand(1); - return command.QueryUser(cluster, policy, user); - } - - /// - /// Retrieve all users and their roles. - /// - /// admin configuration parameters, pass in null for defaults - public List QueryUsers(AdminPolicy policy) - { - AdminCommand.UserCommand command = new AdminCommand.UserCommand(100); - return command.QueryUsers(cluster, policy); - } - - /// - /// Retrieve role definition. - /// - /// admin configuration parameters, pass in null for defaults - /// role name filter - /// if command fails - public Role QueryRole(AdminPolicy policy, string roleName) - { - AdminCommand.RoleCommand command = new AdminCommand.RoleCommand(1); - return command.QueryRole(cluster, policy, roleName); - } - - /// - /// Retrieve all roles. - /// - /// admin configuration parameters, pass in null for defaults - /// if command fails - public List QueryRoles(AdminPolicy policy) - { - AdminCommand.RoleCommand command = new AdminCommand.RoleCommand(100); - return command.QueryRoles(cluster, policy); - } - - //------------------------------------------------------- - // Internal Methods - //------------------------------------------------------- - - private string SendInfoCommand(Policy policy, string command) - { - Node node = cluster.GetRandomNode(); - Connection conn = node.GetConnection(policy.socketTimeout); - Info info; - - try - { - info = new Info(conn, command); - node.PutConnection(conn); - } - catch (Exception) - { - node.CloseConnectionOnError(conn); - throw; - } - return info.GetValue(); - } - - private void ParseInfoError(string prefix, string response) - { - Info.Error error = new(response); - int code = (error.Code == 0) ? ResultCode.SERVER_ERROR : error.Code; - - string message = prefix + ": " + response; - throw new AerospikeException(code, message); - } - - private void JoinRecords(BatchPolicy policy, Record record, Join[] joins) - { - if (record == null) - { - return; - } - - foreach (Join join in joins) - { - List keyList = (List)record.GetValue(join.leftKeysBinName); - - if (keyList != null) - { - Key[] keyArray = new Key[keyList.Count]; - int count = 0; - - foreach (object obj in keyList) - { - Value value = Value.Get(obj); - keyArray[count++] = new Key(join.rightNamespace, join.rightSetName, value); - } - - Record[] records; - if (join.rightBinNames == null || join.rightBinNames.Length == 0) - { - records = Get(policy, keyArray); - } - else - { - records = Get(policy, keyArray, join.rightBinNames); - } - record.bins[join.leftKeysBinName] = records; - } - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System.Reflection; +using System.Text; + +namespace Aerospike.Client +{ + /// + /// Instantiate an AerospikeClient object to access an Aerospike + /// database cluster and perform database operations. + /// + /// This client is thread-safe. One client instance should be used per cluster. + /// Multiple threads should share this cluster instance. + /// + /// + /// Your application uses this class API to perform database operations such as + /// writing and reading records, and selecting sets of records. Write operations + /// include specialized functionality such as append/prepend and arithmetic + /// addition. + /// + /// + /// Each record may have multiple bins, unless the Aerospike server nodes are + /// configured as "single-bin". In "multi-bin" mode, partial records may be + /// written or read by specifying the relevant subset of bins. + /// + /// + public class AerospikeClient : IDisposable, IAerospikeClient + { + //------------------------------------------------------- + // Member variables. + //------------------------------------------------------- + + protected internal Cluster cluster; + + /// + /// Default read policy that is used when read command policy is null. + /// + protected Policy readPolicyDefault; + + /// + /// Default write policy that is used when write command policy is null. + /// + protected WritePolicy writePolicyDefault; + + /// + /// Default scan policy that is used when scan command policy is null. + /// + protected ScanPolicy scanPolicyDefault; + + /// + /// Default query policy that is used when query command policy is null. + /// + protected QueryPolicy queryPolicyDefault; + + /// + /// Default parent policy used in batch read commands. Parent policy fields + /// include socketTimeout, totalTimeout, maxRetries, etc... + /// + protected BatchPolicy batchPolicyDefault; + + /// + /// Default parent policy used in batch write commands. Parent policy fields + /// include socketTimeout, totalTimeout, maxRetries, etc... + /// + protected BatchPolicy batchParentPolicyWriteDefault; + + /// + /// Default write policy used in batch operate commands. + /// Write policy fields include generation, expiration, durableDelete, etc... + /// + protected BatchWritePolicy batchWritePolicyDefault; + + /// + /// Default delete policy used in batch delete commands. + /// + protected BatchDeletePolicy batchDeletePolicyDefault; + + /// + /// Default user defined function policy used in batch UDF execute commands. + /// + protected BatchUDFPolicy batchUDFPolicyDefault; + + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + protected TxnVerifyPolicy txnVerifyPolicyDefault; + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + protected TxnRollPolicy txnRollPolicyDefault; + + /// + /// Default info policy that is used when info command policy is null. + /// + protected InfoPolicy infoPolicyDefault; + + protected WritePolicy operatePolicyReadDefault; + + //------------------------------------------------------- + // Constructors + //------------------------------------------------------- + + /// + /// Initialize Aerospike client. + /// If the host connection succeeds, the client will: + /// + /// Add host to the cluster map + /// Request host's list of other nodes in cluster + /// Add these nodes to cluster map + /// + /// + /// If the connection succeeds, the client is ready to process database requests. + /// If the connection fails, the cluster will remain in a disconnected State + /// until the server is activated. + /// + /// + /// host name + /// host port + /// if host connection fails + public AerospikeClient(string hostname, int port) + : this(new ClientPolicy(), new Host(hostname, port)) + { + } + + /// + /// Initialize Aerospike client. + /// The client policy is used to set defaults and size internal data structures. + /// If the host connection succeeds, the client will: + /// + /// Add host to the cluster map + /// Request host's list of other nodes in cluster + /// Add these nodes to cluster map + /// + /// + /// If the connection succeeds, the client is ready to process database requests. + /// If the connection fails and the policy's failOnInvalidHosts is true, a connection + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State + /// until the server is activated. + /// + /// + /// client configuration parameters, pass in null for defaults + /// host name + /// host port + /// if host connection fails + public AerospikeClient(ClientPolicy policy, string hostname, int port) + : this(policy, new Host(hostname, port)) + { + } + + /// + /// Initialize Aerospike client with suitable hosts to seed the cluster map. + /// The client policy is used to set defaults and size internal data structures. + /// For the first host connection that succeeds, the client will: + /// + /// Add host to the cluster map + /// Request host's list of other nodes in cluster + /// Add these nodes to cluster map + /// + /// + /// In most cases, only one host is necessary to seed the cluster. The remaining hosts + /// are added as future seeds in case of a complete network failure. + /// + /// + /// If one connection succeeds, the client is ready to process database requests. + /// If all connections fail and the policy's failIfNotConnected is true, a connection + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State + /// until the server is activated. + /// + /// + /// client configuration parameters, pass in null for defaults + /// array of potential hosts to seed the cluster + /// if all host connections fail + public AerospikeClient(ClientPolicy policy, params Host[] hosts) + { + if (policy == null) + { + policy = new ClientPolicy(); + } + this.readPolicyDefault = policy.readPolicyDefault; + this.writePolicyDefault = policy.writePolicyDefault; + this.scanPolicyDefault = policy.scanPolicyDefault; + this.queryPolicyDefault = policy.queryPolicyDefault; + this.batchPolicyDefault = policy.batchPolicyDefault; + this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; + this.batchWritePolicyDefault = policy.batchWritePolicyDefault; + this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; + this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; + this.infoPolicyDefault = policy.infoPolicyDefault; + this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); + + cluster = new Cluster(policy, hosts); + cluster.StartTendThread(policy); + } + + /// + /// Construct client without initialization. + /// Should only be used by classes inheriting from this client. + /// + protected internal AerospikeClient(ClientPolicy policy) + { + if (policy != null) + { + this.readPolicyDefault = policy.readPolicyDefault; + this.writePolicyDefault = policy.writePolicyDefault; + this.scanPolicyDefault = policy.scanPolicyDefault; + this.queryPolicyDefault = policy.queryPolicyDefault; + this.batchPolicyDefault = policy.batchPolicyDefault; + this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; + this.batchWritePolicyDefault = policy.batchWritePolicyDefault; + this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; + this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; + this.infoPolicyDefault = policy.infoPolicyDefault; + this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); + } + else + { + this.readPolicyDefault = new Policy(); + this.writePolicyDefault = new WritePolicy(); + this.scanPolicyDefault = new ScanPolicy(); + this.queryPolicyDefault = new QueryPolicy(); + this.batchPolicyDefault = new BatchPolicy(); + this.batchParentPolicyWriteDefault = BatchPolicy.WriteDefault(); + this.batchWritePolicyDefault = new BatchWritePolicy(); + this.batchDeletePolicyDefault = new BatchDeletePolicy(); + this.batchUDFPolicyDefault = new BatchUDFPolicy(); + this.txnVerifyPolicyDefault = new TxnVerifyPolicy(); + this.txnRollPolicyDefault= new TxnRollPolicy(); + this.infoPolicyDefault = new InfoPolicy(); + } + this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); + } + + //------------------------------------------------------- + // Operations policies + //------------------------------------------------------- + + /// + /// Default read policy that is used when read command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public Policy ReadPolicyDefault + { + get { return readPolicyDefault; } + set { readPolicyDefault = value; } + } + + /// + /// Default write policy that is used when write command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public WritePolicy WritePolicyDefault + { + get { return writePolicyDefault; } + set { writePolicyDefault = value; } + } + + /// + /// Default scan policy that is used when scan command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public ScanPolicy ScanPolicyDefault + { + get { return scanPolicyDefault; } + set { scanPolicyDefault = value; } + } + + /// + /// Default query policy that is used when query command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public QueryPolicy QueryPolicyDefault + { + get { return queryPolicyDefault; } + set { queryPolicyDefault = value; } + } + + /// + /// Default parent policy used in batch read commands. Parent policy fields + /// include socketTimeout, totalTimeout, maxRetries, etc... + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public BatchPolicy BatchPolicyDefault + { + get { return batchPolicyDefault; } + set { batchPolicyDefault = value; } + } + + /// + /// Default parent policy used in batch write commands. Parent policy fields + /// include socketTimeout, totalTimeout, maxRetries, etc... + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public BatchPolicy BatchParentPolicyWriteDefault + { + get { return batchParentPolicyWriteDefault; } + set { batchParentPolicyWriteDefault = value; } + } + + /// + /// Default write policy used in batch operate commands. + /// Write policy fields include generation, expiration, durableDelete, etc... + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public BatchWritePolicy BatchWritePolicyDefault + { + get { return batchWritePolicyDefault; } + set { batchWritePolicyDefault = value; } + } + + /// + /// Default delete policy used in batch delete commands. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public BatchDeletePolicy BatchDeletePolicyDefault + { + get { return batchDeletePolicyDefault; } + set { batchDeletePolicyDefault = value; } + } + + /// + /// Default user defined function policy used in batch UDF excecute commands. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command.er modified. + /// + public BatchUDFPolicy BatchUDFPolicyDefault + { + get { return batchUDFPolicyDefault; } + set { batchUDFPolicyDefault = value; } + } + + /// + /// Default info policy that is used when info command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public InfoPolicy InfoPolicyDefault + { + get { return infoPolicyDefault; } + set { infoPolicyDefault = value; } + } + + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public TxnVerifyPolicy TxnVerifyPolicyDefault + { + get { return txnVerifyPolicyDefault; } + set { txnVerifyPolicyDefault = value; } + } + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + public TxnRollPolicy TxnRollPolicyDefault + { + get { return txnRollPolicyDefault; } + set { txnRollPolicyDefault = value; } + } + + //------------------------------------------------------- + // Cluster Connection Management + //------------------------------------------------------- + + public bool Disposed { get; private set; } + private void Dispose(bool disposing) + { + if (!Disposed) + { + if (disposing) + { + this.Close(); + } + + Disposed = true; + } + } + + /// + /// Close all client connections to database server nodes. + /// + public void Dispose() + { + // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method + Dispose(disposing: true); + GC.SuppressFinalize(this); + } + + /// + /// Close all client connections to database server nodes. + /// + public void Close() + { + cluster.Close(); + } + + /// + /// Return if we are ready to talk to the database server cluster. + /// + public bool Connected + { + get + { + return cluster.Connected; + } + } + + /// + /// Cluster associated with this AerospikeClient instance. + /// + public Cluster Cluster + { + get + { + return cluster; + } + } + + /// + /// Return array of active server nodes in the cluster. + /// + public Node[] Nodes + { + get + { + return cluster.Nodes; + } + } + + /// + /// Enable extended periodic cluster and node latency metrics. + /// + public void EnableMetrics(MetricsPolicy metricsPolicy) + { + cluster.EnableMetrics(metricsPolicy); + } + + /// + /// Disable extended periodic cluster and node latency metrics. + /// + public void DisableMetrics() + { + cluster.DisableMetrics(); + } + + /// + /// Return operating cluster statistics snapshot. + /// + public ClusterStats GetClusterStats() + { + return cluster.GetStats(); + } + + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Attempt to commit the given multi-record transaction. First, the expected record versions are + /// sent to the server nodes for verification. If all nodes return success, the transaction is + /// committed. Otherwise, the transaction is aborted. + /// + /// Requires server version 8.0+ + /// + /// + /// multi-record transaction + /// status of the commit on success + /// if verify commit fails + public CommitStatus.CommitStatusType Commit(Txn txn) + { + TxnRoll tr = new(cluster, txn); + + switch (txn.State) + { + default: + case Txn.TxnState.OPEN: + tr.Verify(txnVerifyPolicyDefault, txnRollPolicyDefault); + return tr.Commit(txnRollPolicyDefault); + + case Txn.TxnState.VERIFIED: + return tr.Commit(txnRollPolicyDefault); + + case Txn.TxnState.COMMITTED: + return CommitStatus.CommitStatusType.ALREADY_COMMITTED; + + case Txn.TxnState.ABORTED: + return CommitStatus.CommitStatusType.ALREADY_ABORTED; + } + } + + /// + /// Abort and rollback the given multi-record transaction. + /// + /// Requires server version 8.0+ + /// + /// + /// multi-record transaction + /// statis of the abort + public AbortStatus.AbortStatusType Abort(Txn txn) + { + TxnRoll tr = new(cluster, txn); + + switch (txn.State) + { + default: + case Txn.TxnState.OPEN: + case Txn.TxnState.VERIFIED: + return tr.Abort(txnRollPolicyDefault); + + case Txn.TxnState.COMMITTED: + return AbortStatus.AbortStatusType.ALREADY_COMMITTED; + + case Txn.TxnState.ABORTED: + return AbortStatus.AbortStatusType.ALREADY_ABORTED; + } + } + + //------------------------------------------------------- + // Write Record Operations + //------------------------------------------------------- + + /// + /// Write record bin(s). + /// The policy specifies the command timeouts, record expiration and how the command is + /// handled when the record already exists. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// array of bin name/value pairs + /// if write fails + public void Put(WritePolicy policy, Key key, params Bin[] bins) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.WRITE); + command.Execute(); + } + + //------------------------------------------------------- + // String Operations + //------------------------------------------------------- + + /// + /// Append bin string values to existing record bin values. + /// The policy specifies the command timeout, record expiration and how the command is + /// handled when the record already exists. + /// This call only works for string values. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// array of bin name/value pairs + /// if append fails + public void Append(WritePolicy policy, Key key, params Bin[] bins) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.APPEND); + command.Execute(); + } + + /// + /// Prepend bin string values to existing record bin values. + /// The policy specifies the command timeout, record expiration and how the command is + /// handled when the record already exists. + /// This call works only for string values. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// array of bin name/value pairs + /// if prepend fails + public void Prepend(WritePolicy policy, Key key, params Bin[] bins) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.PREPEND); + command.Execute(); + } + + //------------------------------------------------------- + // Arithmetic Operations + //------------------------------------------------------- + + /// + /// Add integer/double bin values to existing record bin values. + /// The policy specifies the command timeout, record expiration and how the command is + /// handled when the record already exists. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// array of bin name/value pairs + /// if add fails + public void Add(WritePolicy policy, Key key, params Bin[] bins) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.ADD); + command.Execute(); + } + + //------------------------------------------------------- + // Delete Operations + //------------------------------------------------------- + + /// + /// Delete record for specified key. + /// Return whether record existed on server before deletion. + /// The policy specifies the command timeout. + /// + /// delete configuration parameters, pass in null for defaults + /// unique record identifier + /// if delete fails + public bool Delete(WritePolicy policy, Key key) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + DeleteCommand command = new DeleteCommand(cluster, policy, key); + command.Execute(); + return command.Existed(); + } + + /// + /// Delete records for specified keys. If a key is not found, the corresponding result + /// will be . + /// + /// Requires server version 6.0+ + /// + /// + /// batch configuration parameters, pass in null for defaults + /// delete configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// which contains results for keys that did complete + public BatchResults Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Key[] keys) + { + if (keys.Length == 0) + { + return new BatchResults(new BatchRecord[0], true); + } + + if (batchPolicy == null) + { + batchPolicy = batchParentPolicyWriteDefault; + } + + if (deletePolicy == null) + { + deletePolicy = batchDeletePolicyDefault; + } + + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + + BatchAttr attr = new BatchAttr(); + attr.SetDelete(deletePolicy); + + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + try + { + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, null, records, attr, status); + } + + BatchExecutor.Execute(cluster, batchPolicy, commands, status); + return new BatchResults(records, status.GetStatus()); + } + catch (Exception e) + { + // Batch terminated on fatal error. + throw new AerospikeException.BatchRecordArray(records, e); + } + } + + /// + /// Remove records in specified namespace/set efficiently. This method is many orders of magnitude + /// faster than deleting records one at a time. + /// + /// See https://www.aerospike.com/docs/reference/info#truncate + /// + /// + /// This asynchronous server call may return before the truncation is complete. The user can still + /// write new records after the server returns because new records will have last update times + /// greater than the truncate cutoff (set at the time of truncate call). + /// + /// + /// info command configuration parameters, pass in null for defaults + /// required namespace + /// optional set name. Pass in null to delete all sets in namespace. + /// + /// optionally delete records before record last update time. + /// If specified, value must be before the current time. + /// Pass in null to delete all records in namespace/set regardless of last update time. + /// + public void Truncate(InfoPolicy policy, string ns, string set, DateTime? beforeLastUpdate) + { + if (policy == null) + { + policy = infoPolicyDefault; + } + + // Send truncate command to one node. That node will distribute the command to other nodes. + Node node = cluster.GetRandomNode(); + + StringBuilder sb = new StringBuilder(200); + + if (set != null) + { + sb.Append("truncate:namespace="); + sb.Append(ns); + sb.Append(";set="); + sb.Append(set); + } + else + { + sb.Append("truncate-namespace:namespace="); + sb.Append(ns); + } + + if (beforeLastUpdate.HasValue) + { + sb.Append(";lut="); + // Convert to nanoseconds since unix epoch. + sb.Append(Util.NanosFromEpoch(beforeLastUpdate.Value)); + } + + string response = Info.Request(policy, node, sb.ToString()); + + if (!response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) + { + throw new AerospikeException("Truncate failed: " + response); + } + } + + //------------------------------------------------------- + // Touch Operations + //------------------------------------------------------- + + /// + /// Reset record's time to expiration using the policy's expiration. + /// If the record does not exist, it can't be created because the server deletes empty records. + /// Throw an exception if the record does not exist. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// if touch fails + public void Touch(WritePolicy policy, Key key) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + TouchCommand command = new TouchCommand(cluster, policy, key); + command.Execute(); + } + + /// + /// Reset record's time to expiration using the policy's expiration. + /// If the record does not exist, it can't be created because the server deletes empty records. + /// Return true if the record exists and is touched.Return false if the record does not exist. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// true if record was touched, false otherwise + /// if touch fails + public bool Touched(WritePolicy policy, Key key) + { + if (policy == null) + { + policy = writePolicyDefault; + } + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + TouchCommand command = new(cluster, policy, key, false); + command.Execute(); + + return command.Touched; + } + + //------------------------------------------------------- + // Existence-Check Operations + //------------------------------------------------------- + + /// + /// Determine if a record key exists. + /// Return whether record exists or not. + /// The policy can be used to specify timeouts. + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// if command fails + public bool Exists(Policy policy, Key key) + { + if (policy == null) + { + policy = readPolicyDefault; + } + + policy.Txn?.PrepareRead(key.ns); + + ExistsCommand command = new ExistsCommand(cluster, policy, key); + command.Execute(); + return command.Exists(); + } + + /// + /// Check if multiple record keys exist in one batch call. + /// The returned boolean array is in positional order with the original key array order. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// which contains results for keys that did complete + public bool[] Exists(BatchPolicy policy, Key[] keys) + { + if (keys.Length == 0) + { + return new bool[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + policy.Txn?.PrepareRead(keys); + + bool[] existsArray = new bool[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status); + BatchExecutor.Execute(command, status); + return existsArray; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return existsArray; + } + catch (Exception e) + { + throw new AerospikeException.BatchExists(existsArray, e); + } + } + + //------------------------------------------------------- + // Read Record Operations + //------------------------------------------------------- + + /// + /// Read entire record for specified key. + /// If found, return record instance. If not found, return null. + /// The policy can be used to specify timeouts. + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// if read fails + public Record Get(Policy policy, Key key) + { + if (policy == null) + { + policy = readPolicyDefault; + } + + policy.Txn?.PrepareRead(key.ns); + + ReadCommand command = new ReadCommand(cluster, policy, key); + command.Execute(); + return command.Record; + } + + /// + /// Read record header and bins for specified key. + /// If found, return record instance. If not found, return null. + /// The policy can be used to specify timeouts. + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// bins to retrieve + /// if read fails + public Record Get(Policy policy, Key key, params string[] binNames) + { + if (policy == null) + { + policy = readPolicyDefault; + } + + policy.Txn?.PrepareRead(key.ns); + + ReadCommand command = new ReadCommand(cluster, policy, key, binNames); + command.Execute(); + return command.Record; + } + + /// + /// Read record generation and expiration only for specified key. Bins are not read. + /// If found, return record instance. If not found, return null. + /// The policy can be used to specify timeouts. + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// if read fails + public Record GetHeader(Policy policy, Key key) + { + if (policy == null) + { + policy = readPolicyDefault; + } + + policy.Txn?.PrepareRead(key.ns); + + ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key); + command.Execute(); + return command.Record; + } + + //------------------------------------------------------- + // Batch Read Operations + //------------------------------------------------------- + + /// + /// Read multiple records for specified batch keys in one batch call. + /// This method allows different namespaces/bins to be requested for each key in the batch. + /// The returned records are located in the same list. + /// If the BatchRead key field is not found, the corresponding record field will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// list of unique record identifiers and the bins to retrieve. + /// The returned records are located in the same list. + /// true if all batch key requests succeeded + /// if read fails + public bool Get(BatchPolicy policy, List records) + { + if (records.Count == 0) + { + return true; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + policy.Txn?.PrepareRead(records); + + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchReadListCommand(cluster, batchNode, policy, records, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return status.GetStatus(); + } + + /// + /// Read multiple records for specified keys in one batch call. + /// The returned records are in positional order with the original key array order. + /// If a key is not found, the positional record will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// which contains results for keys that did complete + public Record[] Get(BatchPolicy policy, Key[] keys) + { + if (keys.Length == 0) + { + return new Record[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + policy.Txn?.PrepareRead(keys); + + Record[] records = new Record[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status); + BatchExecutor.Execute(command, status); + return records; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return records; + } + catch (Exception e) + { + throw new AerospikeException.BatchRecords(records, e); + } + } + + /// + /// Read multiple record headers and bins for specified keys in one batch call. + /// The returned records are in positional order with the original key array order. + /// If a key is not found, the positional record will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// array of bins to retrieve + /// which contains results for keys that did complete + public Record[] Get(BatchPolicy policy, Key[] keys, params string[] binNames) + { + if (keys.Length == 0) + { + return new Record[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + policy.Txn?.PrepareRead(keys); + + Record[] records = new Record[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status); + BatchExecutor.Execute(command, status); + return records; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return records; + } + catch (Exception e) + { + throw new AerospikeException.BatchRecords(records, e); + } + } + + /// + /// Read multiple records for specified keys using read operations in one batch call. + /// The returned records are in positional order with the original key array order. + /// If a key is not found, the positional record will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// array of read operations on record + /// which contains results for keys that did complete + public Record[] Get(BatchPolicy policy, Key[] keys, params Operation[] ops) + { + if (keys.Length == 0) + { + return new Record[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + policy.Txn?.PrepareRead(keys); + + Record[] records = new Record[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status); + BatchExecutor.Execute(command, status); + return records; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return records; + } + catch (Exception e) + { + throw new AerospikeException.BatchRecords(records, e); + } + } + + /// + /// Read multiple record header data for specified keys in one batch call. + /// The returned records are in positional order with the original key array order. + /// If a key is not found, the positional record will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// which contains results for keys that did complete + public Record[] GetHeader(BatchPolicy policy, Key[] keys) + { + if (keys.Length == 0) + { + return new Record[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + policy.Txn?.PrepareRead(keys); + + Record[] records = new Record[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status); + BatchExecutor.Execute(command, status); + return records; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return records; + } + catch (Exception e) + { + throw new AerospikeException.BatchRecords(records, e); + } + } + + //------------------------------------------------------- + // Join methods + //------------------------------------------------------- + + /// + /// Read specified bins in left record and then join with right records. Each join bin name + /// (Join.leftKeysBinName) must exist in the left record. The join bin must contain a list of + /// keys. Those key are used to retrieve other records using a separate batch get. + /// + /// generic configuration parameters, pass in null for defaults + /// unique main record identifier + /// array of bins to retrieve + /// array of join definitions + /// if main read or join reads fail + public Record Join(BatchPolicy policy, Key key, string[] binNames, params Join[] joins) + { + string[] names = new string[binNames.Length + joins.Length]; + int count = 0; + + foreach (string binName in binNames) + { + names[count++] = binName; + } + + foreach (Join join in joins) + { + names[count++] = join.leftKeysBinName; + } + Record record = Get(policy, key, names); + JoinRecords(policy, record, joins); + return record; + } + + /// + /// Read all bins in left record and then join with right records. Each join bin name + /// (Join.binNameKeys) must exist in the left record. The join bin must contain a list of + /// keys. Those key are used to retrieve other records using a separate batch get. + /// + /// generic configuration parameters, pass in null for defaults + /// unique main record identifier + /// array of join definitions + /// if main read or join reads fail + public Record Join(BatchPolicy policy, Key key, params Join[] joins) + { + Record record = Get(policy, key); + JoinRecords(policy, record, joins); + return record; + } + + //------------------------------------------------------- + // Generic Database Operations + //------------------------------------------------------- + + /// + /// Perform multiple read/write operations on a single key in one batch call. + /// An example would be to add an integer value to an existing record and then + /// read the result, all in one database call. + /// + /// The server executes operations in the same order as the operations array. + /// Both scalar bin operations (Operation) and CDT bin operations (ListOperation, + /// MapOperation) can be performed in same call. + /// + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// database operations to perform + /// if command fails + public Record Operate(WritePolicy policy, Key key, params Operation[] operations) + { + OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); + policy = args.writePolicy; + + if (args.hasWrite) + { + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + OperateCommandWrite command = new(cluster, key, args); + command.Execute(); + return command.Record; + } + else + { + if (policy?.Txn != null) + { + policy.Txn.PrepareRead(key.ns); + } + + OperateCommandRead command = new(cluster, key, args); + command.Execute(); + return command.Record; + } + } + + //------------------------------------------------------- + // Batch Read/Write Operations + //------------------------------------------------------- + + /// + /// Read/Write multiple records for specified batch keys in one batch call. + /// This method allows different namespaces/bins for each key in the batch. + /// The returned records are located in the same list. + /// + /// can be , , or + /// . + /// + /// + /// Requires server version 6.0+ + /// + /// + /// batch configuration parameters, pass in null for defaults + /// list of unique record identifiers and read/write operations + /// true if all batch sub-commands succeeded + /// if command fails + public bool Operate(BatchPolicy policy, List records) + { + if (records.Count == 0) + { + return true; + } + + if (policy == null) + { + policy = batchParentPolicyWriteDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKeys(cluster, policy, records); + } + + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchOperateListCommand(cluster, batchNode, policy, records, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return status.GetStatus(); + } + + /// + /// Perform read/write operations on multiple keys. If a key is not found, the corresponding result + /// will be . + /// + /// Requires server version 6.0+ + /// + /// + /// batch configuration parameters, pass in null for defaults + /// write configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// + /// read/write operations to perform. is not allowed because it returns a + /// variable number of bins and makes it difficult (sometimes impossible) to lineup operations with + /// results. Instead, use for each bin name. + /// + /// which contains results for keys that did complete + public BatchResults Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Key[] keys, params Operation[] ops) + { + if (keys.Length == 0) + { + return new BatchResults(new BatchRecord[0], true); + } + + if (batchPolicy == null) + { + batchPolicy = batchParentPolicyWriteDefault; + } + + if (writePolicy == null) + { + writePolicy = batchWritePolicyDefault; + } + + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + + BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + try + { + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status); + } + + BatchExecutor.Execute(cluster, batchPolicy, commands, status); + return new BatchResults(records, status.GetStatus()); + } + catch (Exception e) + { + throw new AerospikeException.BatchRecordArray(records, e); + } + } + + //------------------------------------------------------- + // Scan Operations + //------------------------------------------------------- + + /// + /// Read all records in specified namespace and set. If the policy's + /// concurrentNodes is specified, each server node will be read in + /// parallel. Otherwise, server nodes are read in series. + /// + /// This call will block until the scan is complete - callbacks are made + /// within the scope of this call. + /// + /// + /// scan configuration parameters, pass in null for defaults + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// read callback method - called with record data + /// + /// optional bin to retrieve. All bins will be returned if not specified. + /// + /// if scan fails + public void ScanAll(ScanPolicy policy, string ns, string setName, ScanCallback callback, params string[] binNames) + { + if (policy == null) + { + policy = scanPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + PartitionTracker tracker = new PartitionTracker(policy, nodes); + ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); + } + + /// + /// Read all records in specified namespace and set for one node only. + /// The node is specified by name. + /// + /// This call will block until the scan is complete - callbacks are made + /// within the scope of this call. + /// + /// + /// scan configuration parameters, pass in null for defaults + /// server node name + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// read callback method - called with record data + /// + /// optional bin to retrieve. All bins will be returned if not specified. + /// + /// if scan fails + public void ScanNode(ScanPolicy policy, string nodeName, string ns, string setName, ScanCallback callback, params string[] binNames) + { + Node node = cluster.GetNode(nodeName); + ScanNode(policy, node, ns, setName, callback, binNames); + } + + /// + /// Read all records in specified namespace and set for one node only. + /// + /// This call will block until the scan is complete - callbacks are made + /// within the scope of this call. + /// + /// + /// scan configuration parameters, pass in null for defaults + /// server node + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// read callback method - called with record data + /// + /// optional bin to retrieve. All bins will be returned if not specified. + /// + /// if scan fails + public void ScanNode(ScanPolicy policy, Node node, string ns, string setName, ScanCallback callback, params string[] binNames) + { + if (policy == null) + { + policy = scanPolicyDefault; + } + + PartitionTracker tracker = new PartitionTracker(policy, node); + ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); + } + + /// + /// Read records in specified namespace, set and partition filter. + /// + /// This call will block until the scan is complete - callbacks are made + /// within the scope of this call. + /// + /// + /// scan configuration parameters, pass in null for defaults + /// filter on a subset of data partitions + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// read callback method - called with record data + /// optional bin to retrieve. All bins will be returned if not specified. + /// if scan fails + public void ScanPartitions(ScanPolicy policy, PartitionFilter partitionFilter, string ns, string setName, ScanCallback callback, params string[] binNames) + { + if (policy == null) + { + policy = scanPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + PartitionTracker tracker = new PartitionTracker(policy, nodes, partitionFilter); + ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); + } + + //--------------------------------------------------------------- + // User defined functions + //--------------------------------------------------------------- + + /// + /// Register package located in a file containing user defined functions with server. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// RegisterTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// path of client file containing user defined functions, relative to current directory + /// path to store user defined functions on the server, relative to configured script directory. + /// language of user defined functions + /// if register fails + public RegisterTask Register(Policy policy, string clientPath, string serverPath, Language language) + { + if (policy == null) + { + policy = writePolicyDefault; + } + string content = Util.ReadFileEncodeBase64(clientPath); + return RegisterCommand.Register(cluster, policy, content, serverPath, language); + } + + /// + /// Register package located in a resource containing user defined functions with server. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// RegisterTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// assembly where resource is located. Current assembly can be obtained by: Assembly.GetExecutingAssembly() + /// namespace path where Lua resource is located. Example: Aerospike.Client.Resources.mypackage.lua + /// path to store user defined functions on the server, relative to configured script directory. + /// language of user defined functions + /// if register fails + public RegisterTask Register(Policy policy, Assembly resourceAssembly, string resourcePath, string serverPath, Language language) + { + if (policy == null) + { + policy = writePolicyDefault; + } + string content; + using (Stream stream = resourceAssembly.GetManifestResourceStream(resourcePath)) + { + byte[] bytes = new byte[stream.Length]; + stream.Read(bytes, 0, bytes.Length); + content = Convert.ToBase64String(bytes); + } + return RegisterCommand.Register(cluster, policy, content, serverPath, language); + } + + /// + /// Register UDF functions located in a code string with server. Example: + /// + /// String code = @" + /// local function reducer(val1,val2) + /// return val1 + val2 + /// end + /// + /// function sum_single_bin(stream,name) + /// local function mapper(rec) + /// return rec[name] + /// end + /// return stream : map(mapper) : reduce(reducer) + /// end + ///"; + /// + /// client.RegisterUdfString(null, code, "mysum.lua", Language.LUA); + /// + /// + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// RegisterTask instance. + /// + /// + /// generic configuration parameters, pass in null for defaults + /// code string containing user defined functions + /// path to store user defined functions on the server, relative to configured script directory. + /// language of user defined functions + /// if register fails + public RegisterTask RegisterUdfString(Policy policy, string code, string serverPath, Language language) + { + if (policy == null) + { + policy = writePolicyDefault; + } + byte[] bytes = ByteUtil.StringToUtf8(code); + string content = Convert.ToBase64String(bytes); + return RegisterCommand.Register(cluster, policy, content, serverPath, language); + } + + /// + /// Remove user defined function from server nodes. + /// + /// info configuration parameters, pass in null for defaults + /// location of UDF on server nodes. Example: mylua.lua + /// if remove fails + public void RemoveUdf(InfoPolicy policy, string serverPath) + { + if (policy == null) + { + policy = infoPolicyDefault; + } + // Send UDF command to one node. That node will distribute the UDF command to other nodes. + string command = "udf-remove:filename=" + serverPath; + Node node = cluster.GetRandomNode(); + string response = Info.Request(policy, node, command); + + if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) + { + return; + } + + if (response.StartsWith("error=file_not_found")) + { + // UDF has already been removed. + return; + } + throw new AerospikeException("Remove UDF failed: " + response); + } + + /// + /// Execute user defined function on server and return results. + /// The function operates on a single record. + /// The package name is used to locate the udf file location: + /// + /// udf file = <server udf dir>/<package name>.lua + /// + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// server package name where user defined function resides + /// user defined function + /// arguments passed in to user defined function + /// if command fails + public object Execute(WritePolicy policy, Key key, string packageName, string functionName, params Value[] args) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + ExecuteCommand command = new ExecuteCommand(cluster, policy, key, packageName, functionName, args); + command.Execute(); + + Record record = command.Record; + + if (record == null || record.bins == null) + { + return null; + } + + IDictionary map = record.bins; + object obj; + + if (map.TryGetValue("SUCCESS", out obj)) + { + return obj; + } + + if (map.TryGetValue("FAILURE", out obj)) + { + throw new AerospikeException(obj.ToString()); + } + throw new AerospikeException("Invalid UDF return value"); + } + + /// + /// Execute user defined function on server for each key and return results. + /// The package name is used to locate the udf file location: + /// + /// udf file = <server udf dir>/<package name>.lua + /// + /// + /// Requires server version 6.0+ + /// + /// + /// batch configuration parameters, pass in null for defaults + /// udf configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// server package name where user defined function resides + /// user defined function + /// arguments passed in to user defined function + /// which contains results for keys that did complete + public BatchResults Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, Key[] keys, string packageName, string functionName, params Value[] functionArgs) + { + if (keys.Length == 0) + { + return new BatchResults(new BatchRecord[0], true); + } + + if (batchPolicy == null) + { + batchPolicy = batchParentPolicyWriteDefault; + } + + if (udfPolicy == null) + { + udfPolicy = batchUDFPolicyDefault; + } + + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + + byte[] argBytes = Packer.Pack(functionArgs); + + BatchAttr attr = new BatchAttr(); + attr.SetUDF(udfPolicy); + + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + try + { + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status); + } + + BatchExecutor.Execute(cluster, batchPolicy, commands, status); + return new BatchResults(records, status.GetStatus()); + } + catch (Exception e) + { + // Batch terminated on fatal error. + throw new AerospikeException.BatchRecordArray(records, e); + } + } + + //---------------------------------------------------------- + // Query/Execute + //---------------------------------------------------------- + + /// + /// Apply user defined function on records that match the background query statement filter. + /// Records are not returned to the client. + /// This asynchronous server call will return before the command is complete. + /// The user can optionally wait for command completion by using the returned + /// ExecuteTask instance. + /// + /// configuration parameters, pass in null for defaults + /// background query definition + /// server package where user defined function resides + /// function name + /// to pass to function name, if any + /// if command fails + public ExecuteTask Execute(WritePolicy policy, Statement statement, string packageName, string functionName, params Value[] functionArgs) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + statement.PackageName = packageName; + statement.FunctionName = functionName; + statement.FunctionArgs = functionArgs; + + cluster.AddCommandCount(); + + ulong taskId = statement.PrepareTaskId(); + Node[] nodes = cluster.ValidateNodes(); + Executor executor = new Executor(nodes.Length); + + foreach (Node node in nodes) + { + ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId); + executor.AddCommand(command); + } + + executor.Execute(nodes.Length); + return new ExecuteTask(cluster, policy, statement, taskId); + } + + /// + /// Apply operations on records that match the background query statement filter. + /// Records are not returned to the client. + /// This asynchronous server call will return before the command is complete. + /// The user can optionally wait for command completion by using the returned + /// ExecuteTask instance. + /// + /// write configuration parameters, pass in null for defaults + /// background query definition + /// list of operations to be performed on selected records + /// if command fails + public ExecuteTask Execute(WritePolicy policy, Statement statement, params Operation[] operations) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (operations.Length > 0) + { + statement.Operations = operations; + } + + cluster.AddCommandCount(); + + ulong taskId = statement.PrepareTaskId(); + Node[] nodes = cluster.ValidateNodes(); + Executor executor = new Executor(nodes.Length); + + foreach (Node node in nodes) + { + ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId); + executor.AddCommand(command); + } + executor.Execute(nodes.Length); + return new ExecuteTask(cluster, policy, statement, taskId); + } + + //-------------------------------------------------------- + // Query functions + //-------------------------------------------------------- + + /// + /// Execute query and call action for each record returned from server. + /// + /// generic configuration parameters, pass in null for defaults + /// query definition + /// action methods to be called for each record + /// if query fails + public void Query(QueryPolicy policy, Statement statement, Action action) + { + using (RecordSet rs = Query(policy, statement)) + { + while (rs.Next()) + { + action(rs.Key, rs.Record); + } + } + } + + /// + /// Execute query and return record iterator. The query executor puts records on a queue in + /// separate threads. The calling thread concurrently pops records off the queue through the + /// record iterator. + /// + /// generic configuration parameters, pass in null for defaults + /// query definition + /// if query fails + public RecordSet Query(QueryPolicy policy, Statement statement) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + + if (cluster.hasPartitionQuery || statement.filter == null) + { + PartitionTracker tracker = new PartitionTracker(policy, statement, nodes); + QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker); + return executor.RecordSet; + } + else + { + QueryRecordExecutor executor = new QueryRecordExecutor(cluster, policy, statement, nodes); + executor.Execute(); + return executor.RecordSet; + } + } + + /// + /// Execute query on all server nodes and return records via the listener. This method will + /// block until the query is complete. Listener callbacks are made within the scope of this call. + /// + /// If is not 1, the supplied listener must handle + /// shared data in a thread-safe manner, because the listener will be called by multiple query + /// threads (one thread per node) in parallel. + /// + /// + /// Requires server version 6.0+ if using a secondary index query. + /// + /// + /// query configuration parameters, pass in null for defaults + /// query definition + /// where to send results + /// if query fails + public void Query(QueryPolicy policy, Statement statement, QueryListener listener) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + + if (cluster.hasPartitionQuery || statement.filter == null) + { + PartitionTracker tracker = new PartitionTracker(policy, statement, nodes); + QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker); + } + else + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported"); + } + } + + /// + /// Execute query for specified partitions and return records via the listener. This method will + /// block until the query is complete. Listener callbacks are made within the scope of this call. + /// + /// If is not 1, the supplied listener must handle + /// shared data in a thread-safe manner, because the listener will be called by multiple query + /// threads (one thread per node) in parallel. + /// + /// + /// The completion status of all partitions is stored in the partitionFilter when the query terminates. + /// This partitionFilter can then be used to resume an incomplete query at a later time. + /// This is the preferred method for query terminate/resume functionality. + /// + /// + /// Requires server version 6.0+ if using a secondary index query. + /// + /// + /// query configuration parameters, pass in null for defaults + /// query definition + /// + /// data partition filter. Set to for all partitions. + /// + /// where to send results + /// if query fails + public void Query + ( + QueryPolicy policy, + Statement statement, + PartitionFilter partitionFilter, + QueryListener listener + ) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + + if (cluster.hasPartitionQuery || statement.filter == null) + { + PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter); + QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker); + } + else + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported"); + } + } + + /// + /// Execute query for specified partitions and return record iterator. The query executor puts + /// records on a queue in separate threads. The calling thread concurrently pops records off + /// the queue through the record iterator. + /// + /// Requires server version 6.0+ if using a secondary index query. + /// + /// + /// query configuration parameters, pass in null for defaults + /// query definition + /// filter on a subset of data partitions + /// if query fails + public RecordSet QueryPartitions + ( + QueryPolicy policy, + Statement statement, + PartitionFilter partitionFilter + ) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + + if (cluster.hasPartitionQuery || statement.filter == null) + { + PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter); + QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker); + return executor.RecordSet; + } + else + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "QueryPartitions() not supported"); + } + } + + /// + /// Execute query, apply statement's aggregation function, and return result iterator. + /// The aggregation function should be located in a Lua script file that can be found from the + /// "LuaConfig.PackagePath" paths static variable. The default package path is "udf/?.lua" + /// where "?" is the packageName. + /// + /// The query executor puts results on a queue in separate threads. The calling thread + /// concurrently pops results off the queue through the ResultSet iterator. + /// The aggregation function is called on both server and client (final reduce). + /// Therefore, the Lua script file must also reside on both server and client. + /// + /// + /// query configuration parameters, pass in null for defaults + /// query definition + /// server package where user defined function resides + /// aggregation function name + /// arguments to pass to function name, if any + /// if query fails + public ResultSet QueryAggregate + ( + QueryPolicy policy, + Statement statement, + string packageName, + string functionName, + params Value[] functionArgs + ) + { + statement.SetAggregateFunction(packageName, functionName, functionArgs); + return QueryAggregate(policy, statement); + } + + /// + /// Execute query, apply statement's aggregation function, call action for each aggregation + /// object returned from server. + /// + /// query configuration parameters, pass in null for defaults + /// + /// query definition with aggregate functions already initialized by SetAggregateFunction(). + /// + /// action methods to be called for each aggregation object + /// if query fails + public void QueryAggregate(QueryPolicy policy, Statement statement, Action action) + { + using (ResultSet rs = QueryAggregate(policy, statement)) + { + while (rs.Next()) + { + action(rs.Object); + } + } + } + + /// + /// Execute query, apply statement's aggregation function, and return result iterator. + /// The aggregation function should be initialized via the statement's SetAggregateFunction() + /// and should be located in a Lua resource file located in an assembly. + /// + /// The query executor puts results on a queue in separate threads. The calling thread + /// concurrently pops results off the queue through the ResultSet iterator. + /// The aggregation function is called on both server and client (final reduce). + /// Therefore, the Lua script file must also reside on both server and client. + /// + /// + /// query configuration parameters, pass in null for defaults + /// + /// query definition with aggregate functions already initialized by SetAggregateFunction(). + /// + /// if query fails + public ResultSet QueryAggregate(QueryPolicy policy, Statement statement) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + QueryAggregateExecutor executor = new QueryAggregateExecutor(cluster, policy, statement, nodes); + executor.Execute(); + return executor.ResultSet; + } + + //-------------------------------------------------------- + // Secondary Index functions + //-------------------------------------------------------- + + /// + /// Create scalar secondary index. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// IndexTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// name of secondary index + /// bin name that data is indexed on + /// underlying data type of secondary index + /// if index create fails + public IndexTask CreateIndex + ( + Policy policy, + string ns, + string setName, + string indexName, + string binName, + IndexType indexType + ) + { + return CreateIndex(policy, ns, setName, indexName, binName, indexType, IndexCollectionType.DEFAULT); + } + + /// + /// Create complex secondary index on bins containing collections. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// IndexTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// name of secondary index + /// bin name that data is indexed on + /// underlying data type of secondary index + /// index collection type + /// optional context to index on elements within a CDT + /// if index create fails + public IndexTask CreateIndex + ( + Policy policy, + string ns, + string setName, + string indexName, + string binName, + IndexType indexType, + IndexCollectionType indexCollectionType, + params CTX[] ctx + ) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + StringBuilder sb = new StringBuilder(1024); + sb.Append("sindex-create:ns="); + sb.Append(ns); + + if (setName != null && setName.Length > 0) + { + sb.Append(";set="); + sb.Append(setName); + } + + sb.Append(";indexname="); + sb.Append(indexName); + + if (ctx != null && ctx.Length > 0) + { + byte[] bytes = PackUtil.Pack(ctx); + string base64 = Convert.ToBase64String(bytes); + + sb.Append(";context="); + sb.Append(base64); + } + + if (indexCollectionType != IndexCollectionType.DEFAULT) + { + sb.Append(";indextype="); + sb.Append(indexCollectionType); + } + + sb.Append(";indexdata="); + sb.Append(binName); + sb.Append(','); + sb.Append(indexType); + + // Send index command to one node. That node will distribute the command to other nodes. + String response = SendInfoCommand(policy, sb.ToString()); + + if (response.Equals("OK", StringComparison.CurrentCultureIgnoreCase)) + { + // Return task that could optionally be polled for completion. + return new IndexTask(cluster, policy, ns, indexName, true); + } + + ParseInfoError("Create index failed", response); + return null; + } + + /// + /// Delete secondary index. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// IndexTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// name of secondary index + /// if index drop fails + public IndexTask DropIndex(Policy policy, string ns, string setName, string indexName) + { + if (policy == null) + { + policy = writePolicyDefault; + } + StringBuilder sb = new StringBuilder(500); + sb.Append("sindex-delete:ns="); + sb.Append(ns); + + if (setName != null && setName.Length > 0) + { + sb.Append(";set="); + sb.Append(setName); + } + sb.Append(";indexname="); + sb.Append(indexName); + + // Send index command to one node. That node will distribute the command to other nodes. + String response = SendInfoCommand(policy, sb.ToString()); + + if (response.Equals("OK", StringComparison.CurrentCultureIgnoreCase)) + { + return new IndexTask(cluster, policy, ns, indexName, false); + } + + ParseInfoError("Drop index failed", response); + return null; + } + + //----------------------------------------------------------------- + // XDR - Cross datacenter replication + //----------------------------------------------------------------- + + /// + /// Set XDR filter for given datacenter name and namespace. The expression filter indicates + /// which records XDR should ship to the datacenter. + /// + /// info configuration parameters, pass in null for defaults + /// XDR datacenter name + /// namespace - equivalent to database name + /// expression filter + /// if command fails + public void SetXDRFilter(InfoPolicy policy, string datacenter, string ns, Expression filter) + { + if (policy == null) + { + policy = infoPolicyDefault; + } + + // Send XDR command to one node. That node will distribute the XDR command to other nodes. + string command = "xdr-set-filter:dc=" + datacenter + ";namespace=" + ns + ";exp=" + filter.GetBase64(); + Node node = cluster.GetRandomNode(); + string response = Info.Request(policy, node, command); + + if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) + { + return; + } + + ParseInfoError("xdr-set-filter failed", response); + } + + //------------------------------------------------------- + // User administration + //------------------------------------------------------- + + /// + /// Create user with password and roles. Clear-text password will be hashed using bcrypt + /// before sending to server. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + /// user password in clear-text format + /// variable arguments array of role names. Predefined roles are listed in Role.cs + public void CreateUser(AdminPolicy policy, string user, string password, IList roles) + { + string hash = AdminCommand.HashPassword(password); + AdminCommand command = new AdminCommand(); + command.CreateUser(cluster, policy, user, hash, roles); + } + + /// + /// Remove user from cluster. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + public void DropUser(AdminPolicy policy, string user) + { + AdminCommand command = new AdminCommand(); + command.DropUser(cluster, policy, user); + } + + /// + /// Change user's password. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + /// user password in clear-text format + public void ChangePassword(AdminPolicy policy, string user, string password) + { + if (cluster.user == null) + { + throw new AerospikeException("Invalid user"); + } + + byte[] userBytes = ByteUtil.StringToUtf8(user); + byte[] passwordBytes = ByteUtil.StringToUtf8(password); + + string hash = AdminCommand.HashPassword(password); + byte[] hashBytes = ByteUtil.StringToUtf8(hash); + + AdminCommand command = new AdminCommand(); + + if (Util.ByteArrayEquals(userBytes, cluster.user)) + { + // Change own password. + command.ChangePassword(cluster, policy, userBytes, hash); + } + else + { + // Change other user's password by user admin. + command.SetPassword(cluster, policy, userBytes, hash); + } + cluster.ChangePassword(userBytes, passwordBytes, hashBytes); + } + + /// + /// Add roles to user's list of roles. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + /// role names. Predefined roles are listed in Role.cs + public void GrantRoles(AdminPolicy policy, string user, IList roles) + { + AdminCommand command = new AdminCommand(); + command.GrantRoles(cluster, policy, user, roles); + } + + /// + /// Remove roles from user's list of roles. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + /// role names. Predefined roles are listed in Role.cs + public void RevokeRoles(AdminPolicy policy, string user, IList roles) + { + AdminCommand command = new AdminCommand(); + command.RevokeRoles(cluster, policy, user, roles); + } + + /// + /// Create user defined role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// privileges assigned to the role. + /// if command fails + public void CreateRole(AdminPolicy policy, string roleName, IList privileges) + { + AdminCommand command = new AdminCommand(); + command.CreateRole(cluster, policy, roleName, privileges); + } + + /// + /// Create user defined role with optional privileges and whitelist. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// optional list of privileges assigned to role. + /// + /// optional list of allowable IP addresses assigned to role. + /// IP addresses can contain wildcards (ie. 10.1.2.0/24). + /// + /// if command fails + public void CreateRole(AdminPolicy policy, string roleName, IList privileges, IList whitelist) + { + AdminCommand command = new AdminCommand(); + command.CreateRole(cluster, policy, roleName, privileges, whitelist, 0, 0); + } + + /// + /// Create user defined role with optional privileges, whitelist and read/write quotas. + /// Quotas require server security configuration "enable-quotas" to be set to true. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// optional list of privileges assigned to role. + /// + /// optional list of allowable IP addresses assigned to role. + /// IP addresses can contain wildcards (ie. 10.1.2.0/24). + /// + /// optional maximum reads per second limit, pass in zero for no limit. + /// optional maximum writes per second limit, pass in zero for no limit. + /// if command fails + public void CreateRole + ( + AdminPolicy policy, + string roleName, + IList privileges, + IList whitelist, + int readQuota, + int writeQuota + ) + { + AdminCommand command = new AdminCommand(); + command.CreateRole(cluster, policy, roleName, privileges, whitelist, readQuota, writeQuota); + } + + /// + /// Drop user defined role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// if command fails + public void DropRole(AdminPolicy policy, string roleName) + { + AdminCommand command = new AdminCommand(); + command.DropRole(cluster, policy, roleName); + } + + /// + /// Grant privileges to an user defined role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// privileges assigned to the role. + /// if command fails + public void GrantPrivileges(AdminPolicy policy, string roleName, IList privileges) + { + AdminCommand command = new AdminCommand(); + command.GrantPrivileges(cluster, policy, roleName, privileges); + } + + /// + /// Revoke privileges from an user defined role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// privileges assigned to the role. + /// if command fails + public void RevokePrivileges(AdminPolicy policy, string roleName, IList privileges) + { + AdminCommand command = new AdminCommand(); + command.RevokePrivileges(cluster, policy, roleName, privileges); + } + + /// + /// Set IP address whitelist for a role. If whitelist is null or empty, remove existing whitelist from role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// + /// list of allowable IP addresses or null. + /// IP addresses can contain wildcards (ie. 10.1.2.0/24). + /// + /// if command fails + public void SetWhitelist(AdminPolicy policy, string roleName, IList whitelist) + { + AdminCommand command = new AdminCommand(); + command.SetWhitelist(cluster, policy, roleName, whitelist); + } + + /// + /// Set maximum reads/writes per second limits for a role. If a quota is zero, the limit is removed. + /// Quotas require server security configuration "enable-quotas" to be set to true. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// maximum reads per second limit, pass in zero for no limit. + /// maximum writes per second limit, pass in zero for no limit. + /// if command fails + public void SetQuotas(AdminPolicy policy, string roleName, int readQuota, int writeQuota) + { + AdminCommand command = new AdminCommand(); + command.setQuotas(cluster, policy, roleName, readQuota, writeQuota); + } + + /// + /// Retrieve roles for a given user. + /// + /// admin configuration parameters, pass in null for defaults + /// user name filter + public User QueryUser(AdminPolicy policy, string user) + { + AdminCommand.UserCommand command = new AdminCommand.UserCommand(1); + return command.QueryUser(cluster, policy, user); + } + + /// + /// Retrieve all users and their roles. + /// + /// admin configuration parameters, pass in null for defaults + public List QueryUsers(AdminPolicy policy) + { + AdminCommand.UserCommand command = new AdminCommand.UserCommand(100); + return command.QueryUsers(cluster, policy); + } + + /// + /// Retrieve role definition. + /// + /// admin configuration parameters, pass in null for defaults + /// role name filter + /// if command fails + public Role QueryRole(AdminPolicy policy, string roleName) + { + AdminCommand.RoleCommand command = new AdminCommand.RoleCommand(1); + return command.QueryRole(cluster, policy, roleName); + } + + /// + /// Retrieve all roles. + /// + /// admin configuration parameters, pass in null for defaults + /// if command fails + public List QueryRoles(AdminPolicy policy) + { + AdminCommand.RoleCommand command = new AdminCommand.RoleCommand(100); + return command.QueryRoles(cluster, policy); + } + + //------------------------------------------------------- + // Internal Methods + //------------------------------------------------------- + + private string SendInfoCommand(Policy policy, string command) + { + Node node = cluster.GetRandomNode(); + Connection conn = node.GetConnection(policy.socketTimeout); + Info info; + + try + { + info = new Info(conn, command); + node.PutConnection(conn); + } + catch (Exception) + { + node.CloseConnectionOnError(conn); + throw; + } + return info.GetValue(); + } + + private void ParseInfoError(string prefix, string response) + { + Info.Error error = new(response); + int code = (error.Code == 0) ? ResultCode.SERVER_ERROR : error.Code; + + string message = prefix + ": " + response; + throw new AerospikeException(code, message); + } + + private void JoinRecords(BatchPolicy policy, Record record, Join[] joins) + { + if (record == null) + { + return; + } + + foreach (Join join in joins) + { + List keyList = (List)record.GetValue(join.leftKeysBinName); + + if (keyList != null) + { + Key[] keyArray = new Key[keyList.Count]; + int count = 0; + + foreach (object obj in keyList) + { + Value value = Value.Get(obj); + keyArray[count++] = new Key(join.rightNamespace, join.rightSetName, value); + } + + Record[] records; + if (join.rightBinNames == null || join.rightBinNames.Length == 0) + { + records = Get(policy, keyArray); + } + else + { + records = Get(policy, keyArray, join.rightBinNames); + } + record.bins[join.leftKeysBinName] = records; + } + } + } + } +} diff --git a/AerospikeClient/Main/AerospikeException.cs b/AerospikeClient/Main/AerospikeException.cs index dd46d42c..0e883c55 100644 --- a/AerospikeClient/Main/AerospikeException.cs +++ b/AerospikeClient/Main/AerospikeException.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -14,7 +14,9 @@ * License for the specific language governing permissions and limitations under * the License. */ +using System.Collections.ObjectModel; using System.Text; +using static Aerospike.Client.CommitError; namespace Aerospike.Client { @@ -41,6 +43,12 @@ public AerospikeException(int resultCode, Exception e) this.resultCode = resultCode; } + public AerospikeException(int resultCode, string message) + : base(message) + { + this.resultCode = resultCode; + } + public AerospikeException(int resultCode) : base("") { @@ -76,7 +84,7 @@ public override string Message { get { - StringBuilder sb = new StringBuilder(512); + StringBuilder sb = new(512); sb.Append("Error "); sb.Append(resultCode); @@ -159,7 +167,7 @@ public bool ShouldSerializeNode() } /// - /// Transaction policy. + /// Command policy. /// public Policy Policy { @@ -200,7 +208,7 @@ public int Iteration } /// - /// Is it possible that write transaction may have completed. + /// Is it possible that write command may have completed. /// public bool InDoubt { @@ -211,7 +219,7 @@ public bool InDoubt } /// - /// Set whether it is possible that the write transaction may have completed + /// Set whether it is possible that the write command may have completed /// even though this exception was generated. This may be the case when a /// client error occurs (like timeout) after the command was sent to the server. /// @@ -299,7 +307,7 @@ public override string Message return "Client timeout: " + totalTimeout; } - StringBuilder sb = new StringBuilder(512); + StringBuilder sb = new(512); if (client) { @@ -487,6 +495,12 @@ public BatchRecordArray(BatchRecord[] records, Exception e) { this.records = records; } + + public BatchRecordArray(BatchRecord[] records, string message, Exception e) + : base(ResultCode.BATCH_FAILED, message, e) + { + this.records = records; + } } /// @@ -560,5 +574,116 @@ public Backoff(int resultCode) : base(resultCode) { } } + + /// + /// Exception thrown when a multi-record transaction commit fails. + /// Commit Exception has similar behavior to AggregateException. + /// might be populated if mutliple exceptions contribute to the failure. + /// + public sealed class Commit : AerospikeException + { + /// + /// Error status of the attempted commit. + /// + public readonly CommitErrorType Error; + + /// + /// Verify result for each read key in the MRT. May be null if failure occurred before verify. + /// + public readonly BatchRecord[] VerifyRecords; + + /// + /// Roll forward/backward result for each write key in the MRT. May be null if failure occurred before + /// roll forward/backward. + /// + public readonly BatchRecord[] RollRecords; + + private readonly Exception[] _innerExceptions; // Complete set of exceptions. + + public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords) + : base(ResultCode.TXN_FAILED, CommitErrorToString(error)) + { + this.Error = error; + this.VerifyRecords = verifyRecords; + this.RollRecords = rollRecords; + _innerExceptions = Array.Empty(); + } + + public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Exception innerException) + : base(ResultCode.TXN_FAILED, CommitErrorToString(error), innerException) + { + this.Error = error; + this.VerifyRecords = verifyRecords; + this.RollRecords = rollRecords; + _innerExceptions = new[] { innerException }; + } + + public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Exception[] innerExceptions) + : base(ResultCode.TXN_FAILED, CommitErrorToString(error), innerExceptions[0]) + { + this.Error = error; + this.VerifyRecords = verifyRecords; + this.RollRecords = rollRecords; + _innerExceptions = innerExceptions; + } + + /// + /// Get Commit message with records. + /// + public override string Message + { + get + { + StringBuilder sb = new(1024); + RecordsToString(sb, "verify errors:", VerifyRecords); + RecordsToString(sb, "roll errors:", RollRecords); + return BaseMessage + sb.ToString(); + } + } + + /// + /// Gets a read-only collection of the instances that caused the + /// current exception. + /// + public ReadOnlyCollection InnerExceptions => new ReadOnlyCollection(_innerExceptions); + } + + private static void RecordsToString(StringBuilder sb, String title, BatchRecord[] records) + { + if (records == null) + { + return; + } + + int count = 0; + + foreach (BatchRecord br in records) { + // Only show results with an error response. + if (!(br.resultCode == ResultCode.OK || br.resultCode == ResultCode.NO_RESPONSE)) + { + // Only show first 3 errors. + if (count >= 3) + { + sb.Append(System.Environment.NewLine); + sb.Append("..."); + break; + } + + if (count == 0) + { + sb.Append(System.Environment.NewLine); + sb.Append(title); + } + + sb.Append(System.Environment.NewLine); + sb.Append(br.key); + sb.Append(','); + sb.Append(br.resultCode); + sb.Append(','); + sb.Append(br.inDoubt); + count++; + } + } + } } } diff --git a/AerospikeClient/Main/BatchRecord.cs b/AerospikeClient/Main/BatchRecord.cs index cc59de11..3d8a31d5 100644 --- a/AerospikeClient/Main/BatchRecord.cs +++ b/AerospikeClient/Main/BatchRecord.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -39,7 +39,7 @@ public class BatchRecord public int resultCode; /// - /// Is it possible that the write transaction may have completed even though an error + /// Is it possible that the write command may have completed even though an error /// occurred for this record. This may be the case when a client error occurs (like timeout) /// after the command was sent to the server. /// diff --git a/AerospikeClient/Main/CommitError.cs b/AerospikeClient/Main/CommitError.cs new file mode 100644 index 00000000..8fc6fb8b --- /dev/null +++ b/AerospikeClient/Main/CommitError.cs @@ -0,0 +1,45 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) error status. + /// + public static class CommitError + { + public enum CommitErrorType + { + VERIFY_FAIL, + VERIFY_FAIL_CLOSE_ABANDONED, + VERIFY_FAIL_ABORT_ABANDONED, + MARK_ROLL_FORWARD_ABANDONED + } + + public static string CommitErrorToString(CommitErrorType type) + { + return type switch + { + CommitErrorType.VERIFY_FAIL => "MRT verify failed. MRT aborted.", + CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED => "MRT verify failed. MRT aborted. MRT client close abandoned. Server will eventually close the MRT.", + CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED => "MRT verify failed. MRT client abort abandoned. Server will eventually abort the MRT.", + CommitErrorType.MARK_ROLL_FORWARD_ABANDONED => "MRT client mark roll forward abandoned. Server will eventually abort the MRT.", + _ => "Unexpected CommitErrorType" + }; + } + } +} diff --git a/AerospikeClient/Main/CommitStatus.cs b/AerospikeClient/Main/CommitStatus.cs new file mode 100644 index 00000000..50c9a7d2 --- /dev/null +++ b/AerospikeClient/Main/CommitStatus.cs @@ -0,0 +1,49 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using static Aerospike.Client.AbortStatus; + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) commit status code. + /// + public static class CommitStatus + { + public enum CommitStatusType + { + OK, + ALREADY_COMMITTED, + ALREADY_ABORTED, + ROLL_FORWARD_ABANDONED, + CLOSE_ABANDONED + } + + public static string CommitErrorToString(CommitStatusType status) + { + return status switch + { + CommitStatusType.OK => "Commit succeeded.", + CommitStatusType.ALREADY_COMMITTED => "Already committed.", + CommitStatusType.ALREADY_ABORTED => "Already aborted.", + CommitStatusType.ROLL_FORWARD_ABANDONED => "MRT client roll forward abandoned. Server will eventually commit the MRT.", + CommitStatusType.CLOSE_ABANDONED => "MRT has been rolled forward, but MRT client close was abandoned. Server will eventually close the MRT.", + _ => "Unexpected AbortStatusType." + }; + } + } +} diff --git a/AerospikeClient/Main/IAerospikeClient.cs b/AerospikeClient/Main/IAerospikeClient.cs index e941a20e..2dff73b1 100644 --- a/AerospikeClient/Main/IAerospikeClient.cs +++ b/AerospikeClient/Main/IAerospikeClient.cs @@ -14,7 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System.Diagnostics.Metrics; using System.Reflection; namespace Aerospike.Client @@ -27,57 +26,92 @@ public interface IAerospikeClient /// /// Default read policy that is used when read command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// Policy ReadPolicyDefault { get; set; } /// /// Default write policy that is used when write command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// WritePolicy WritePolicyDefault { get; set; } /// /// Default scan policy that is used when scan command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// ScanPolicy ScanPolicyDefault { get; set; } /// /// Default query policy that is used when query command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// QueryPolicy QueryPolicyDefault { get; set; } /// /// Default parent policy used in batch read commands.Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchPolicy BatchPolicyDefault { get; set; } /// /// Default parent policy used in batch write commands. Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchPolicy BatchParentPolicyWriteDefault { get; set; } /// /// Default write policy used in batch operate commands. /// Write policy fields include generation, expiration, durableDelete, etc... + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchWritePolicy BatchWritePolicyDefault { get; set; } /// /// Default delete policy used in batch delete commands. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchDeletePolicy BatchDeletePolicyDefault { get; set; } /// - /// Default user defined function policy used in batch UDF excecute commands. + /// Default user defined function policy used in batch UDF execute commands. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchUDFPolicy BatchUDFPolicyDefault { get; set; } /// /// Default info policy that is used when info command policy is null. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// InfoPolicy InfoPolicyDefault { get; set; } + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + TxnVerifyPolicy TxnVerifyPolicyDefault { get; set; } + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. + /// + TxnRollPolicy TxnRollPolicyDefault { get; set; } + //------------------------------------------------------- // Cluster Connection Management //------------------------------------------------------- @@ -118,13 +152,40 @@ public interface IAerospikeClient /// ClusterStats GetClusterStats(); + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Attempt to commit the given multi-record transaction. First, the expected record versions are + /// sent to the server nodes for verification. If all nodes return success, the transaction is + /// committed. Otherwise, the transaction is aborted. + /// + /// Requires server version 8.0+ + /// + /// + /// multi-record transaction + /// status of the commit on success + /// if verify commit fails + CommitStatus.CommitStatusType Commit(Txn txn); + + /// + /// Abort and rollback the given multi-record transaction. + /// + /// Requires server version 8.0+ + /// + /// + /// multi-record transaction + /// statis of the abort + AbortStatus.AbortStatusType Abort(Txn txn); + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- /// /// Write record bin(s). - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// write configuration parameters, pass in null for defaults @@ -139,7 +200,7 @@ public interface IAerospikeClient /// /// Append bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -151,7 +212,7 @@ public interface IAerospikeClient /// /// Prepend bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -167,7 +228,7 @@ public interface IAerospikeClient /// /// Add integer bin values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for integer values. /// @@ -184,7 +245,7 @@ public interface IAerospikeClient /// /// Delete record for specified key. /// Return whether record existed on server before deletion. - /// The policy specifies the transaction timeout. + /// The policy specifies the command timeout. /// /// delete configuration parameters, pass in null for defaults /// unique record identifier @@ -607,7 +668,7 @@ public interface IAerospikeClient /// server package name where user defined function resides /// user defined function /// arguments passed in to user defined function - /// if transaction fails + /// if command fails object Execute(WritePolicy policy, Key key, string packageName, string functionName, params Value[] args); /// diff --git a/AerospikeClient/Main/Key.cs b/AerospikeClient/Main/Key.cs index 16f05bed..944079fc 100644 --- a/AerospikeClient/Main/Key.cs +++ b/AerospikeClient/Main/Key.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2018 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -14,7 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System.Linq; namespace Aerospike.Client { @@ -370,6 +369,7 @@ public Key(string ns, byte[] digest, string setName, Value userKey) this.ns = ns; this.digest = digest; this.setName = setName; + // Do not try to validate userKey type because it is most likely null. this.userKey = userKey; } @@ -378,11 +378,9 @@ public Key(string ns, byte[] digest, string setName, Value userKey) /// public override int GetHashCode() { - int result = 1; - foreach (byte element in digest) - { - result = 31 * result + element; - } + // The digest is already a hash, so pick 4 bytes from the 20 byte digest at a + // random offset (in this case 8). + int result = ByteUtil.LittleBytesToInt(digest, 8) + 31; return 31 * result + ns.GetHashCode(); } diff --git a/AerospikeClient/Main/ResultCode.cs b/AerospikeClient/Main/ResultCode.cs index 92c0ddc9..9e701fb6 100644 --- a/AerospikeClient/Main/ResultCode.cs +++ b/AerospikeClient/Main/ResultCode.cs @@ -14,6 +14,8 @@ * License for the specific language governing permissions and limitations under * the License. */ +using System.Transactions; + namespace Aerospike.Client { /// @@ -21,6 +23,12 @@ namespace Aerospike.Client /// public sealed class ResultCode { + /// + /// Multi-record transaction failed. + /// Value: -17 + /// + public const int TXN_FAILED = -17; + /// /// One or more keys failed in a batch. /// Value: -16 @@ -265,7 +273,7 @@ public sealed class ResultCode public const int OP_NOT_APPLICABLE = 26; /// - /// The transaction was not performed because the filter was false. + /// The command was not performed because the filter was false. /// Value: 27 /// public const int FILTERED_OUT = 27; @@ -432,6 +440,43 @@ public sealed class ResultCode /// public const int UDF_BAD_RESPONSE = 100; + /// + /// MRT record blocked by a different transaction. + /// Value: 120 + /// + public const int MRT_BLOCKED = 120; + + /// + /// MRT read version mismatch identified during commit. + /// Some other command changed the record outside of the transaction. + /// Value: 121 + /// + public const int MRT_VERSION_MISMATCH = 121; + + /// + /// MRT deadline reached without a successful commit or abort. + /// Value: 122 + /// + public const int MRT_EXPIRED = 122; + + /// + /// MRT write command limit (4096) exceeded. + /// Value: 123 + /// + public const int MRT_TOO_MANY_WRITES = 123; + + /// + /// MRT was already committed. + /// Value: 124 + /// + public const int MRT_COMMITTED = 124; + + /// + /// MRT was already aborted. + /// Value: 125 + /// + public const int MRT_ABORTED = 125; + /// /// Batch functionality has been disabled. /// Value: 150 @@ -548,6 +593,9 @@ public static string GetResultString(int resultCode) { switch (resultCode) { + case TXN_FAILED: + return "Multi-record transaction failed"; + case BATCH_FAILED: return "One or more keys failed in a batch"; @@ -669,10 +717,10 @@ public static string GetResultString(int resultCode) return "Operation not applicable"; case FILTERED_OUT: - return "Transaction filtered out"; + return "Command filtered out"; case LOST_CONFLICT: - return "Transaction failed due to conflict with XDR"; + return "Command failed due to conflict with XDR"; case XDR_KEY_BUSY: return "Write can't complete until XDR finishes shipping."; @@ -696,7 +744,7 @@ public static string GetResultString(int resultCode) return "Invalid field"; case ILLEGAL_STATE: - return "Illegal state"; + return "Illegal State"; case INVALID_USER: return "Invalid user"; @@ -752,6 +800,24 @@ public static string GetResultString(int resultCode) case UDF_BAD_RESPONSE: return "UDF returned error"; + case MRT_BLOCKED: + return "MRT record blocked by a different transaction"; + + case MRT_VERSION_MISMATCH: + return "MRT version mismatch"; + + case MRT_EXPIRED: + return "MRT expired"; + + case MRT_TOO_MANY_WRITES: + return "MRT write command limit exceeded"; + + case MRT_COMMITTED: + return "MRT already committed"; + + case MRT_ABORTED: + return "MRT already aborted"; + case BATCH_DISABLED: return "Batch functionality has been disabled"; diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs new file mode 100644 index 00000000..c2faea39 --- /dev/null +++ b/AerospikeClient/Main/Txn.cs @@ -0,0 +1,303 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Mutli-record transaction (MRT). Each command in the MRT must use the same namespace. + /// + public class Txn + { + /// + /// MRT State. + /// + public enum TxnState + { + OPEN, + VERIFIED, + COMMITTED, + ABORTED + } + + private static long randomState = DateTime.UtcNow.Ticks; + + public long Id { get; private set; } + public ConcurrentHashMap Reads { get; private set; } + public ConcurrentHashSet Writes { get; private set; } + public TxnState State { get; internal set; } + + /// + /// MRT namespace. + /// + public string Ns { get; private set; } + + /// + /// MRT deadline. The deadline is a wall clock time calculated by the server from the + /// MRT timeout that is sent by the client when creating the MRT monitor record. This deadline + /// is used to avoid client/server clock skew issues. For internal use only. + /// + internal int Deadline { get; set; } + + /// + /// MRT timeout in seconds. The timer starts when the MRT monitor record is created. + /// This occurs when the first command in the MRT is executed. If the timeout is reached before + /// a commit or abort is called, the server will expire and rollback the MRT. + /// + /// If the MRT timeout is zero, the server configuration mrt-duration is used. + /// The default mrt-duration is 10 seconds. + /// + /// + public int Timeout { get; set; } + + private bool writeInDoubt; + + public bool InDoubt { get; internal set; } + + /// + /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with + /// default capacities. + /// + /// The default client MRT timeout is zero. This means use the server configuration mrt-duration + /// as the MRT timeout. The default mrt-duration is 10 seconds. + /// + /// + public Txn() + { + Id = CreateId(); + Reads = new ConcurrentHashMap(); + Writes = new ConcurrentHashSet(); + State = TxnState.OPEN; + } + + /// + /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with + /// given capacities. + /// + /// The default client MRT timeout is zero. This means use the server configuration mrt-duration + /// as the MRT timeout. The default mrt-duration is 10 seconds. + /// + /// + /// expected number of record reads in the MRT. Minimum value is 16. + /// expected number of record writes in the MRT. Minimum value is 16. + public Txn(int readsCapacity, int writesCapacity) + { + if (readsCapacity < 16) + { + readsCapacity = 16; // TODO ask Richard and Brian about this + } + + if (writesCapacity < 16) + { + writesCapacity = 16; + } + + Id = CreateId(); + Reads = new ConcurrentHashMap(readsCapacity); + Writes = new ConcurrentHashSet(writesCapacity); + State = TxnState.OPEN; + } + + [System.Runtime.CompilerServices.MethodImpl(System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)] + private static long UnsignedRightShift(long n, int s) => n >= 0 ? n >> s : (n >> s) + (2 << ~s); + + private static long CreateId() + { + long oldState, newState, interlockedResult; + do + { + oldState = Interlocked.Read(ref randomState); + newState = oldState; + newState ^= UnsignedRightShift(newState, 12); + newState ^= newState << 25; + newState ^= UnsignedRightShift(newState, 27); + interlockedResult = Interlocked.CompareExchange(ref randomState, newState, oldState); + } while (oldState != interlockedResult); + return newState * 0x2545f4914f6cdd1dL; + } + + /// + /// Verify current MRT state and namespace for a future read command. + /// + /// + internal void PrepareRead(string ns) + { + VerifyCommand(); + SetNamespace(ns); + } + + /// + /// Verify current MRT state and namespaces for a future batch read command. + /// + /// + internal void PrepareRead(Key[] keys) + { + VerifyCommand(); + SetNamespace(keys); + } + + /// + /// Verify current MRT state and namespaces for a future batch read command. + /// + /// + internal void PrepareRead(List records) + { + VerifyCommand(); + SetNamespace(records); + } + + /// + /// Verify that the MRT state allows future commands. + /// + /// + public void VerifyCommand() + { + if (State != TxnState.OPEN) + { + throw new AerospikeException("Command not allowed in current MRT state: " + State); + } + } + + /// + /// Process the results of a record read. For internal use only. + /// + /// + /// + internal void OnRead(Key key, long? version) + { + if (version.HasValue) + { + Reads.TryAdd(key, version.Value); + } + } + + /// + /// Get record version for a given key. + /// + /// + /// + public long? GetReadVersion(Key key) + { + if (Reads.ContainsKey(key)) + { + return Reads[key]; + } + else + { + return null; + } + } + + /// + /// Process the results of a record write. For internal use only. + /// + /// + /// + /// + internal void OnWrite(Key key, long? version, int resultCode) + { + // Write commands set namespace prior to sending the command, so there is + // no need to call it here when receiving the response. + if (version.HasValue) + { + Reads.TryAdd(key, version.Value); + } + else + { + if (resultCode == ResultCode.OK) + { + Reads.Remove(key); + Writes.Add(key); + } + } + } + + /// + /// Add key to write hash when write command is in doubt (usually caused by timeout). + /// + internal void OnWriteInDoubt(Key key) + { + writeInDoubt = true; + Reads.Remove(key); + Writes.Add(key); + } + + /// + /// Set MRT namespace only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + internal void SetNamespace(string ns) + { + if (Ns == null) + { + Ns = ns; + } + else if (!Ns.Equals(ns)) { + throw new AerospikeException("Namespace must be the same for all commands in the MRT. orig: " + + Ns + " new: " + ns); + } + } + + /// + /// Set MRT namespaces for each key only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + internal void SetNamespace(Key[] keys) + { + foreach (Key key in keys) + { + SetNamespace(key.ns); + } + } + + /// + /// Set MRT namespaces for each key only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + internal void SetNamespace(List records) + { + foreach (BatchRead br in records) + { + SetNamespace(br.key.ns); + } + } + + /// + /// Return if the MRT monitor record should be closed/deleted + /// + /// + internal bool CloseMonitor() + { + return Deadline != 0 && !writeInDoubt; + } + + /// + /// Does MRT monitor record exist. + /// + public bool MonitorExists() + { + return Deadline != 0; + } + + public void Clear() + { + Ns = null; + Deadline = 0; + Reads.Clear(); + Writes.Clear(); + } + } +} diff --git a/AerospikeClient/Metrics/LatencyBuckets.cs b/AerospikeClient/Metrics/LatencyBuckets.cs index 537127d8..0eb909d4 100644 --- a/AerospikeClient/Metrics/LatencyBuckets.cs +++ b/AerospikeClient/Metrics/LatencyBuckets.cs @@ -18,7 +18,7 @@ namespace Aerospike.Client { /// - /// Latency buckets for a transaction group (See ). + /// Latency buckets for a command group (See ). /// Latency bucket counts are cumulative and not reset on each metrics snapshot interval. /// public sealed class LatencyBuckets diff --git a/AerospikeClient/Metrics/MetricsWriter.cs b/AerospikeClient/Metrics/MetricsWriter.cs index e3b54d20..7d94989a 100644 --- a/AerospikeClient/Metrics/MetricsWriter.cs +++ b/AerospikeClient/Metrics/MetricsWriter.cs @@ -148,7 +148,7 @@ private void Open() sb.Append(now.ToString(timestampFormat)); sb.Append(" header(1)"); - sb.Append(" cluster[name,cpu,mem,recoverQueueSize,invalidNodeCount,tranCount,retryCount,delayQueueTimeoutCount,asyncThreadsInUse,asyncCompletionPortsInUse,node[]]"); + sb.Append(" cluster[name,cpu,mem,recoverQueueSize,invalidNodeCount,commandCount,retryCount,delayQueueTimeoutCount,asyncThreadsInUse,asyncCompletionPortsInUse,node[]]"); sb.Append(" node[name,address,port,syncConn,asyncConn,errors,timeouts,latency[]]"); sb.Append(" conn[inUse,inPool,opened,closed]"); sb.Append(" latency("); @@ -180,7 +180,7 @@ private void WriteCluster(Cluster cluster) sb.Append(','); sb.Append(cluster.InvalidNodeCount); // Cumulative. Not reset on each interval. sb.Append(','); - sb.Append(cluster.GetTranCount()); // Cumulative. Not reset on each interval. + sb.Append(cluster.GetCommandCount()); // Cumulative. Not reset on each interval. sb.Append(','); sb.Append(cluster.GetRetryCount()); // Cumulative. Not reset on each interval. sb.Append(','); diff --git a/AerospikeClient/Policy/AdminPolicy.cs b/AerospikeClient/Policy/AdminPolicy.cs index 8e455649..a66724f5 100644 --- a/AerospikeClient/Policy/AdminPolicy.cs +++ b/AerospikeClient/Policy/AdminPolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2019 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -41,5 +41,14 @@ public AdminPolicy(AdminPolicy other) public AdminPolicy() { } + + /// + /// Creates a deep copy of this admin policy. + /// + /// + public AdminPolicy Clone() + { + return new AdminPolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchDeletePolicy.cs b/AerospikeClient/Policy/BatchDeletePolicy.cs index 6425a1d9..c477eec6 100644 --- a/AerospikeClient/Policy/BatchDeletePolicy.cs +++ b/AerospikeClient/Policy/BatchDeletePolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -37,7 +37,7 @@ public sealed class BatchDeletePolicy public Expression filterExp; /// - /// Desired consistency guarantee when committing a transaction on the server. The default + /// Desired consistency guarantee when committing a command on the server. The default /// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to /// be successful before returning success to the client. /// @@ -64,7 +64,7 @@ public sealed class BatchDeletePolicy public int generation; /// - /// If the transaction results in a record deletion, leave a tombstone for the record. + /// If the command results in a record deletion, leave a tombstone for the record. /// This prevents deleted records from reappearing after node failures. /// Valid for Aerospike Server Enterprise Edition only. /// @@ -101,5 +101,14 @@ public BatchDeletePolicy(BatchDeletePolicy other) public BatchDeletePolicy() { } + + /// + /// Creates a deep copy of this batch delete policy. + /// + /// + public BatchDeletePolicy Clone() + { + return new BatchDeletePolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchPolicy.cs b/AerospikeClient/Policy/BatchPolicy.cs index 46b54fb7..0ffe8270 100644 --- a/AerospikeClient/Policy/BatchPolicy.cs +++ b/AerospikeClient/Policy/BatchPolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -23,7 +23,7 @@ namespace Aerospike.Client /// /// Batch parent policy. /// - public sealed class BatchPolicy : Policy + public class BatchPolicy : Policy { /// /// Maximum number of concurrent synchronous batch node request threads to server nodes. @@ -44,7 +44,7 @@ public sealed class BatchPolicy : Policy ///
    ///
  • /// 1 (default): Issue batch node requests sequentially. This mode has a performance advantage - /// for small batch sizes because requests can be issued in the main transaction thread without + /// for small batch sizes because requests can be issued in the main command thread without /// using a thread pool. This mode is not optimal for batch requests spread out over many nodes /// in a large cluster. ///
  • @@ -71,7 +71,7 @@ public sealed class BatchPolicy : Policy /// Allow batch to be processed immediately in the server's receiving thread for in-memory /// namespaces. If false, the batch will always be processed in separate service threads. /// - /// For batch transactions with smaller sized records (<= 1K per record), inline + /// For batch commands with smaller sized records (<= 1K per record), inline /// processing will be significantly faster on in-memory namespaces. /// /// @@ -189,6 +189,15 @@ public static BatchPolicy WriteDefault() policy.maxRetries = 0; return policy; } + + /// + /// Creates a deep copy of this batch policy. + /// + /// + public new BatchPolicy Clone() + { + return new BatchPolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchReadPolicy.cs b/AerospikeClient/Policy/BatchReadPolicy.cs index b6a5f9cf..3a9a9a52 100644 --- a/AerospikeClient/Policy/BatchReadPolicy.cs +++ b/AerospikeClient/Policy/BatchReadPolicy.cs @@ -93,5 +93,14 @@ public BatchReadPolicy(BatchReadPolicy other) public BatchReadPolicy() { } + + /// + /// Creates a deep copy of this batch read policy. + /// + /// + public BatchReadPolicy Clone() + { + return new BatchReadPolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchUDFPolicy.cs b/AerospikeClient/Policy/BatchUDFPolicy.cs index 35f79358..b6cb9945 100644 --- a/AerospikeClient/Policy/BatchUDFPolicy.cs +++ b/AerospikeClient/Policy/BatchUDFPolicy.cs @@ -37,7 +37,7 @@ public sealed class BatchUDFPolicy public Expression filterExp; /// - /// Desired consistency guarantee when committing a transaction on the server. The default + /// Desired consistency guarantee when committing a command on the server. The default /// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to /// be successful before returning success to the client. /// @@ -63,7 +63,7 @@ public sealed class BatchUDFPolicy public int expiration; /// - /// If the transaction results in a record deletion, leave a tombstone for the record. + /// If the command results in a record deletion, leave a tombstone for the record. /// This prevents deleted records from reappearing after node failures. /// Valid for Aerospike Server Enterprise Edition only. /// @@ -99,5 +99,14 @@ public BatchUDFPolicy(BatchUDFPolicy other) public BatchUDFPolicy() { } + + /// + /// Creates a deep copy of this batch UDF policy. + /// + /// + public BatchUDFPolicy Clone() + { + return new BatchUDFPolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchWritePolicy.cs b/AerospikeClient/Policy/BatchWritePolicy.cs index e4d77e6e..28d3dbb8 100644 --- a/AerospikeClient/Policy/BatchWritePolicy.cs +++ b/AerospikeClient/Policy/BatchWritePolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -45,7 +45,7 @@ public sealed class BatchWritePolicy public RecordExistsAction recordExistsAction = RecordExistsAction.UPDATE; /// - /// Desired consistency guarantee when committing a transaction on the server. The default + /// Desired consistency guarantee when committing a command on the server. The default /// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to /// be successful before returning success to the client. /// @@ -88,7 +88,7 @@ public sealed class BatchWritePolicy public int expiration; /// - /// If the transaction results in a record deletion, leave a tombstone for the record. + /// If the command results in a record deletion, leave a tombstone for the record. /// This prevents deleted records from reappearing after node failures. /// Valid for Aerospike Server Enterprise Edition only. /// @@ -127,5 +127,14 @@ public BatchWritePolicy(BatchWritePolicy other) public BatchWritePolicy() { } + + /// + /// Creates a deep copy of this batch write policy. + /// + /// + public BatchWritePolicy Clone() + { + return new BatchWritePolicy(this); + } } } diff --git a/AerospikeClient/Policy/ClientPolicy.cs b/AerospikeClient/Policy/ClientPolicy.cs index 9947761f..a9d9af38 100644 --- a/AerospikeClient/Policy/ClientPolicy.cs +++ b/AerospikeClient/Policy/ClientPolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -14,6 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ +using System; using System.Collections.Generic; namespace Aerospike.Client @@ -79,7 +80,7 @@ public class ClientPolicy public int minConnsPerNode; /// - /// Maximum number of synchronous connections allowed per server node. Transactions will go + /// Maximum number of synchronous connections allowed per server node. Commands will go /// through retry logic and potentially fail with "ResultCode.NO_MORE_CONNECTIONS" if the maximum /// number of connections would be exceeded. /// @@ -120,7 +121,7 @@ public class ClientPolicy /// /// /// If server's proto-fd-idle-ms is zero (no reap), then maxSocketIdle should also be zero. - /// Connections retrieved from a pool in transactions will not be checked for maxSocketIdle + /// Connections retrieved from a pool in commands will not be checked for maxSocketIdle /// when maxSocketIdle is zero. Idle connections will still be trimmed down from peak /// connections to min connections (minConnsPerNode and asyncMinConnsPerNode) using a /// hard-coded 55 second limit in the cluster tend thread. @@ -147,7 +148,7 @@ public class ClientPolicy /// /// The number of cluster tend iterations that defines the window for . /// One tend iteration is defined as plus the time to tend all nodes. - /// At the end of the window, the error count is reset to zero and backoff state is removed + /// At the end of the window, the error count is reset to zero and backoff State is removed /// on all nodes. /// /// Default: 1 @@ -225,7 +226,18 @@ public class ClientPolicy /// Default user defined function policy used in batch UDF excecute commands. /// public BatchUDFPolicy batchUDFPolicyDefault = new BatchUDFPolicy(); - + + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + public TxnVerifyPolicy txnVerifyPolicyDefault = new TxnVerifyPolicy(); + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + public TxnRollPolicy txnRollPolicyDefault = new TxnRollPolicy(); + /// /// Default info policy that is used when info command's policy is null. /// @@ -263,6 +275,15 @@ public class ClientPolicy /// public bool useServicesAlternate; + /// For testing purposes only. Do not modify. + /// + /// Should the AerospikeClient instance communicate with the first seed node only + /// instead of using the data partition map to determine which node to send the + /// database command. + /// + /// Default: false + public bool forceSingleNode = false; + /// /// Track server rack data. This field is useful when directing read commands to the server node /// that contains the key and exists on the same rack as the client. This serves to lower cloud @@ -326,10 +347,13 @@ public ClientPolicy(ClientPolicy other) this.batchWritePolicyDefault = new BatchWritePolicy(other.batchWritePolicyDefault); this.batchDeletePolicyDefault = new BatchDeletePolicy(other.batchDeletePolicyDefault); this.batchUDFPolicyDefault = new BatchUDFPolicy(other.batchUDFPolicyDefault); + this.txnVerifyPolicyDefault = new TxnVerifyPolicy(other.txnVerifyPolicyDefault); + this.txnRollPolicyDefault = new TxnRollPolicy(other.txnRollPolicyDefault); this.infoPolicyDefault = new InfoPolicy(other.infoPolicyDefault); this.tlsPolicy = (other.tlsPolicy != null) ? new TlsPolicy(other.tlsPolicy) : null; this.ipMap = other.ipMap; this.useServicesAlternate = other.useServicesAlternate; + this.forceSingleNode = other.forceSingleNode; this.rackAware = other.rackAware; this.rackId = other.rackId; this.rackIds = (other.rackIds != null) ? new List(other.rackIds) : null; @@ -341,5 +365,14 @@ public ClientPolicy(ClientPolicy other) public ClientPolicy() { } + + /// + /// Creates a deep copy of this client policy. + /// + /// + public ClientPolicy Clone() + { + return new ClientPolicy(this); + } } } diff --git a/AerospikeClient/Policy/CommitLevel.cs b/AerospikeClient/Policy/CommitLevel.cs index 1be4761f..9cbbb11d 100644 --- a/AerospikeClient/Policy/CommitLevel.cs +++ b/AerospikeClient/Policy/CommitLevel.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2018 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -17,7 +17,7 @@ namespace Aerospike.Client { /// - /// Desired consistency guarantee when committing a transaction on the server. + /// Desired consistency guarantee when committing a command on the server. /// public enum CommitLevel { diff --git a/AerospikeClient/Policy/InfoPolicy.cs b/AerospikeClient/Policy/InfoPolicy.cs index a3852522..2e7c3a35 100644 --- a/AerospikeClient/Policy/InfoPolicy.cs +++ b/AerospikeClient/Policy/InfoPolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2019 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -50,5 +50,14 @@ public InfoPolicy() { timeout = 1000; } + + /// + /// Creates a deep copy of this info policy. + /// + /// + public InfoPolicy Clone() + { + return new InfoPolicy(this); + } } } diff --git a/AerospikeClient/Policy/Policy.cs b/AerospikeClient/Policy/Policy.cs index 4fbe4f93..524f4756 100644 --- a/AerospikeClient/Policy/Policy.cs +++ b/AerospikeClient/Policy/Policy.cs @@ -21,10 +21,19 @@ namespace Aerospike.Client { /// - /// Transaction policy attributes used in all database commands. + /// Command policy attributes used in all database commands. /// public class Policy { + /// + /// Multi-record transaction identifier (MRT). If this field is populated, the corresponding + /// command will be included in the MRT. This field is ignored for scan/query. + /// + /// Default: null + /// + /// + public Txn Txn { get; set; } + /// /// Read policy for AP (availability) namespaces. /// @@ -52,7 +61,7 @@ public class Policy /// /// Optional expression filter. If filterExp exists and evaluates to false, the - /// transaction is ignored. + /// command is ignored. /// /// Default: null /// @@ -76,7 +85,7 @@ public class Policy /// /// If socketTimeout is not zero and the socket has been idle for at least socketTimeout, /// both maxRetries and totalTimeout are checked. If maxRetries and totalTimeout are not - /// exceeded, the transaction is retried. + /// exceeded, the command is retried. /// /// /// For synchronous methods, socketTimeout is the socket SendTimeout and ReceiveTimeout. @@ -90,15 +99,15 @@ public class Policy public int socketTimeout = 30000; /// - /// Total transaction timeout in milliseconds. + /// Total command timeout in milliseconds. /// /// The totalTimeout is tracked on the client and sent to the server along with - /// the transaction in the wire protocol. The client will most likely timeout - /// first, but the server also has the capability to timeout the transaction. + /// the command in the wire protocol. The client will most likely timeout + /// first, but the server also has the capability to timeout the command. /// /// - /// If totalTimeout is not zero and totalTimeout is reached before the transaction - /// completes, the transaction will abort with + /// If totalTimeout is not zero and totalTimeout is reached before the command + /// completes, the command will abort with /// . /// /// @@ -111,11 +120,11 @@ public class Policy /// /// Delay milliseconds after socket read timeout in an attempt to recover the socket - /// in the background. Processing continues on the original transaction and the user - /// is still notified at the original transaction timeout. + /// in the background. Processing continues on the original command and the user + /// is still notified at the original command timeout. /// - /// When a transaction is stopped prematurely, the socket must be drained of all incoming - /// data or closed to prevent unread socket data from corrupting the next transaction + /// When a command is stopped prematurely, the socket must be drained of all incoming + /// data or closed to prevent unread socket data from corrupting the next command /// that would use that socket. /// /// @@ -136,7 +145,7 @@ public class Policy /// /// /// The disadvantage of enabling timeoutDelay is that extra memory/processing is required - /// to drain sockets and additional connections may still be needed for transaction retries. + /// to drain sockets and additional connections may still be needed for command retries. /// /// /// If timeoutDelay were to be enabled, 3000ms would be a reasonable value. @@ -148,16 +157,16 @@ public class Policy public int TimeoutDelay = 0; /// - /// Maximum number of retries before aborting the current transaction. + /// Maximum number of retries before aborting the current command. /// The initial attempt is not counted as a retry. /// - /// If maxRetries is exceeded, the transaction will abort with + /// If maxRetries is exceeded, the command will abort with /// . /// /// /// WARNING: Database writes that are not idempotent (such as Add()) /// should not be retried because the write operation may be performed - /// multiple times if the client timed out previous transaction attempts. + /// multiple times if the client timed out previous command attempts. /// It's important to use a distinct WritePolicy for non-idempotent /// writes which sets maxRetries = 0; /// @@ -201,7 +210,7 @@ public class Policy /// Determine how record TTL (time to live) is affected on reads. When enabled, the server can /// efficiently operate as a read-based LRU cache where the least recently used records are expired. /// The value is expressed as a percentage of the TTL sent on the most recent write such that a read - /// within this interval of the record’s end of life will generate a touch. + /// within this interval of the record�s end of life will generate a touch. /// /// For example, if the most recent write had a TTL of 10 hours and read_touch_ttl_percent is set to /// 80, the next read within 8 hours of the record's end of life (equivalent to 2 hours after the most @@ -251,7 +260,7 @@ public class Policy /// /// Throw exception if is defined and that filter evaluates - /// to false (transaction ignored). The + /// to false (command ignored). The /// will contain result code . /// /// This field is not applicable to batch, scan or query commands. @@ -275,6 +284,7 @@ public class Policy /// public Policy(Policy other) { + this.Txn = other.Txn; this.readModeAP = other.readModeAP; this.readModeSC = other.readModeSC; this.replica = other.replica; @@ -296,6 +306,7 @@ public Policy(Policy other) /// public Policy() { + Txn = null; } /// @@ -323,6 +334,15 @@ public void SetTimeouts(int socketTimeout, int totalTimeout) this.socketTimeout = totalTimeout; } } + + /// + /// Creates a deep copy of this policy. + /// + /// + public Policy Clone() + { + return new Policy(this); + } } } #pragma warning restore 0618 diff --git a/AerospikeClient/Policy/QueryPolicy.cs b/AerospikeClient/Policy/QueryPolicy.cs index 49f39f6f..dbf20500 100644 --- a/AerospikeClient/Policy/QueryPolicy.cs +++ b/AerospikeClient/Policy/QueryPolicy.cs @@ -21,6 +21,10 @@ namespace Aerospike.Client { /// /// Container object for policy attributes used in query operations. + /// + /// Inherited Policy fields and + /// are ignored. + /// /// public class QueryPolicy : Policy { @@ -76,7 +80,7 @@ public class QueryPolicy : Policy public bool includeBinData = true; /// - /// Terminate query if cluster is in migration state. If the server supports partition + /// Terminate query if cluster is in migration State. If the server supports partition /// queries or the query filter is null (scan), this field is ignored. /// Default: false /// @@ -142,6 +146,15 @@ public QueryPolicy() : base() base.totalTimeout = 0; base.maxRetries = 5; } + + /// + /// Creates a deep copy of this query policy. + /// + /// + public new QueryPolicy Clone() + { + return new QueryPolicy(this); + } } } #pragma warning restore 0618 diff --git a/AerospikeClient/Policy/ScanPolicy.cs b/AerospikeClient/Policy/ScanPolicy.cs index 4684524b..7b912afb 100644 --- a/AerospikeClient/Policy/ScanPolicy.cs +++ b/AerospikeClient/Policy/ScanPolicy.cs @@ -21,6 +21,10 @@ namespace Aerospike.Client { /// /// Container object for optional parameters used in scan operations. + /// + /// Inherited Policy fields and + /// are ignored. + /// /// public sealed class ScanPolicy : Policy { @@ -119,6 +123,15 @@ public ScanPolicy() : base() base.totalTimeout = 0; base.maxRetries = 5; } + + /// + /// Creates a deep copy of this scan policy. + /// + /// + public new ScanPolicy Clone() + { + return new ScanPolicy(this); + } } } #pragma warning restore 0618 diff --git a/AerospikeClient/Policy/TlsPolicy.cs b/AerospikeClient/Policy/TlsPolicy.cs index d03e4831..1b64bece 100644 --- a/AerospikeClient/Policy/TlsPolicy.cs +++ b/AerospikeClient/Policy/TlsPolicy.cs @@ -151,5 +151,14 @@ private void ParseClientCertificateFile(string clientCertificateFile) clientCertificates = new X509CertificateCollection(); clientCertificates.Add(cert); } + + /// + /// Creates a deep copy of this TLS policy. + /// + /// + public TlsPolicy Clone() + { + return new TlsPolicy(this); + } } } diff --git a/AerospikeClient/Policy/TxnRollPolicy.cs b/AerospikeClient/Policy/TxnRollPolicy.cs new file mode 100644 index 00000000..a05a00fe --- /dev/null +++ b/AerospikeClient/Policy/TxnRollPolicy.cs @@ -0,0 +1,55 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) policy fields used to batch roll forward/backward records on + /// commit or abort.Used a placeholder for now as there are no additional fields beyond BatchPolicy. + /// + public sealed class TxnRollPolicy : BatchPolicy + { + /// + /// Copy policy from another policy. + /// + public TxnRollPolicy(TxnRollPolicy other) : + base(other) + { + } + + /// + /// Default constructor. + /// + public TxnRollPolicy() + { + replica = Replica.MASTER; + maxRetries = 5; + socketTimeout = 3000; + totalTimeout = 10000; + sleepBetweenRetries = 1000; + } + + /// + /// Creates a deep copy of this txn roll policy. + /// + /// + public new TxnRollPolicy Clone() + { + return new TxnRollPolicy(this); + } + } +} diff --git a/AerospikeClient/Policy/TxnVerifyPolicy.cs b/AerospikeClient/Policy/TxnVerifyPolicy.cs new file mode 100644 index 00000000..495a793c --- /dev/null +++ b/AerospikeClient/Policy/TxnVerifyPolicy.cs @@ -0,0 +1,56 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) policy fields used to batch verify record versions on commit. + /// Used a placeholder for now as there are no additional fields beyond BatchPolicy. + /// + public sealed class TxnVerifyPolicy : BatchPolicy + { + /// + /// Copy policy from another policy. + /// + public TxnVerifyPolicy(TxnVerifyPolicy other) : + base(other) + { + } + + /// + /// Default constructor. + /// + public TxnVerifyPolicy() + { + readModeSC = ReadModeSC.LINEARIZE; + replica = Replica.MASTER; + maxRetries = 5; + socketTimeout = 3000; + totalTimeout = 10000; + sleepBetweenRetries = 1000; + } + + /// + /// Creates a deep copy of this txn verify policy. + /// + /// + public new TxnVerifyPolicy Clone() + { + return new TxnVerifyPolicy(this); + } + } +} diff --git a/AerospikeClient/Policy/WritePolicy.cs b/AerospikeClient/Policy/WritePolicy.cs index 3fd1247e..421f95cd 100644 --- a/AerospikeClient/Policy/WritePolicy.cs +++ b/AerospikeClient/Policy/WritePolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2019 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -40,7 +40,7 @@ public sealed class WritePolicy : Policy public GenerationPolicy generationPolicy = GenerationPolicy.NONE; /// - /// Desired consistency guarantee when committing a transaction on the server. The default + /// Desired consistency guarantee when committing a command on the server. The default /// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to /// be successful before returning success to the client. /// Default: CommitLevel.COMMIT_ALL @@ -93,7 +93,7 @@ public sealed class WritePolicy : Policy public bool respondAllOps; /// - /// If the transaction results in a record deletion, leave a tombstone for the record. + /// If the command results in a record deletion, leave a tombstone for the record. /// This prevents deleted records from reappearing after node failures. /// Valid for Aerospike Server Enterprise Edition 3.10+ only. /// Default: false (do not tombstone deleted records). @@ -131,5 +131,14 @@ public WritePolicy() // Writes are not retried by default. base.maxRetries = 0; } + + /// + /// Creates a deep copy of this write policy. + /// + /// + public new WritePolicy Clone() + { + return new WritePolicy(this); + } } } diff --git a/AerospikeClient/Query/PartitionTracker.cs b/AerospikeClient/Query/PartitionTracker.cs index 4cc845fd..7895a66c 100644 --- a/AerospikeClient/Query/PartitionTracker.cs +++ b/AerospikeClient/Query/PartitionTracker.cs @@ -238,7 +238,7 @@ public List AssignPartitionsToNodes(Cluster cluster, string ns) Node node = p.GetNodeQuery(cluster, parts, part); // Use node name to check for single node equality because - // partition map may be in transitional state between + // partition map may be in transitional State between // the old and new node with the same name. if (nodeFilter != null && !nodeFilter.Name.Equals(node.Name)) { @@ -249,7 +249,7 @@ public List AssignPartitionsToNodes(Cluster cluster, string ns) if (np == null) { - // If the partition map is in a transitional state, multiple + // If the partition map is in a transitional State, multiple // NodePartitions instances (each with different partitions) // may be created for a single node. np = new NodePartitions(node, partitionsCapacity); @@ -390,7 +390,7 @@ public bool IsComplete(bool hasPartitionQuery, Policy policy, List= threads.Length) ? threads.Length : policy.maxConcurrentNodes; - cluster.AddTran(); + cluster.AddCommandCount(); } protected internal void InitializeThreads() diff --git a/AerospikeClient/Query/QueryListenerExecutor.cs b/AerospikeClient/Query/QueryListenerExecutor.cs index da72f62b..bdc6f6c4 100644 --- a/AerospikeClient/Query/QueryListenerExecutor.cs +++ b/AerospikeClient/Query/QueryListenerExecutor.cs @@ -29,7 +29,7 @@ public static void execute PartitionTracker tracker ) { - cluster.AddTran(); + cluster.AddCommandCount(); ulong taskId = statement.PrepareTaskId(); diff --git a/AerospikeClient/Query/QueryPartitionExecutor.cs b/AerospikeClient/Query/QueryPartitionExecutor.cs index 3c00b4ce..4bd5b6d8 100644 --- a/AerospikeClient/Query/QueryPartitionExecutor.cs +++ b/AerospikeClient/Query/QueryPartitionExecutor.cs @@ -51,7 +51,7 @@ PartitionTracker tracker this.cancel = new CancellationTokenSource(); this.tracker = tracker; this.recordSet = new RecordSet(this, policy.recordQueueSize, cancel.Token); - cluster.AddTran(); + cluster.AddCommandCount(); ThreadPool.UnsafeQueueUserWorkItem(this.Run, null); } diff --git a/AerospikeClient/Query/RecordSet.cs b/AerospikeClient/Query/RecordSet.cs index 626df355..03e69a78 100644 --- a/AerospikeClient/Query/RecordSet.cs +++ b/AerospikeClient/Query/RecordSet.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -198,7 +198,7 @@ internal void Abort() { valid = false; - // Send end command to transaction thread. + // Send end command to command thread. // It's critical that the end token add succeeds. while (!queue.TryAdd(END)) { diff --git a/AerospikeClient/Query/ResultSet.cs b/AerospikeClient/Query/ResultSet.cs index 9f5b7f1b..37ee4450 100644 --- a/AerospikeClient/Query/ResultSet.cs +++ b/AerospikeClient/Query/ResultSet.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2019 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -189,7 +189,7 @@ internal void Abort() { valid = false; - // Send end command to transaction thread. + // Send end command to command thread. // It's critical that the end token add succeeds. while (!queue.TryAdd(END)) { diff --git a/AerospikeClient/Util/ConcurrentHashMap.cs b/AerospikeClient/Util/ConcurrentHashMap.cs new file mode 100644 index 00000000..a0adf5cd --- /dev/null +++ b/AerospikeClient/Util/ConcurrentHashMap.cs @@ -0,0 +1,166 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public class ConcurrentHashMap + { + private readonly ReaderWriterLockSlim _lock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion); + private readonly Dictionary _dictionary; + + public ConcurrentHashMap() + { + _dictionary = new Dictionary(); + } + + public ConcurrentHashMap(int capacity) + { + _dictionary = new Dictionary(capacity); + } + + public TValue this[TKey key] + { + get + { + try + { + _lock.EnterReadLock(); + return _dictionary[key]; + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + set + { + try + { + _lock.EnterWriteLock(); + _dictionary[key] = value; + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + } + + public bool TryAdd(TKey key, TValue value) + { + _lock.EnterUpgradeableReadLock(); + try + { + if (!_dictionary.ContainsKey(key)) + { + _lock.EnterWriteLock(); + try + { + _dictionary.Add(key, value); + } + finally + { + _lock.ExitWriteLock(); + } + return true; + } + } + finally + { + _lock.ExitUpgradeableReadLock(); + } + return false; + } + + public void Clear() + { + try + { + _lock.EnterWriteLock(); + _dictionary.Clear(); + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + + public bool ContainsKey(TKey key) + { + try + { + _lock.EnterReadLock(); + return _dictionary.ContainsKey(key); + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + + public bool Remove(TKey key) + { + try + { + _lock.EnterWriteLock(); + return _dictionary.Remove(key); + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + + public int Count + { + get + { + try + { + _lock.EnterReadLock(); + return _dictionary.Count; + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + } + + public bool PerformActionOnEachElement(Func initilaize, Action action) + { + _lock.EnterReadLock(); + try + { + if (initilaize is null || initilaize(_dictionary.Count())) + { + int cnt = 0; + foreach (var element in _dictionary) + { + action(element.Key, element.Value, cnt++); + } + return cnt > 0; + } + } + finally + { + _lock.ExitReadLock(); + } + return false; + } + } +} diff --git a/AerospikeClient/Util/ConcurrentHashSet.cs b/AerospikeClient/Util/ConcurrentHashSet.cs new file mode 100644 index 00000000..92aae80c --- /dev/null +++ b/AerospikeClient/Util/ConcurrentHashSet.cs @@ -0,0 +1,138 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public class ConcurrentHashSet + { + private readonly ReaderWriterLockSlim _lock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion); + private readonly HashSet _hashSet; + + public ConcurrentHashSet() + { + _hashSet = new HashSet(); + } + + public ConcurrentHashSet(int capacity) + { + _hashSet = new HashSet(capacity); + } + + public bool Add(T item) + { + _lock.EnterUpgradeableReadLock(); + try + { + if (!_hashSet.Contains(item)) + { + _lock.EnterWriteLock(); + try + { + _hashSet.Add(item); + } + finally + { + _lock.ExitWriteLock(); + } + return true; + } + } + finally + { + _lock.ExitUpgradeableReadLock(); + } + return false; + } + + public void Clear() + { + try + { + _lock.EnterWriteLock(); + _hashSet.Clear(); + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + + public bool Contains(T item) + { + try + { + _lock.EnterReadLock(); + return _hashSet.Contains(item); + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + + public bool Remove(T item) + { + try + { + _lock.EnterWriteLock(); + return _hashSet.Remove(item); + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + + public int Count + { + get + { + try + { + _lock.EnterReadLock(); + return _hashSet.Count; + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + } + + public bool PerformActionOnEachElement(Func initilaize, Action action) + { + _lock.EnterReadLock(); + try + { + if (initilaize is null || initilaize(_hashSet.Count())) + { + int cnt = 0; + foreach (var element in _hashSet) + { + action(element, cnt++); + } + return cnt > 0; + } + } + finally + { + _lock.ExitReadLock(); + } + return false; + } + } +} diff --git a/AerospikeDemo/AsyncTransaction.cs b/AerospikeDemo/AsyncTransaction.cs new file mode 100644 index 00000000..afecded2 --- /dev/null +++ b/AerospikeDemo/AsyncTransaction.cs @@ -0,0 +1,289 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using System.Threading; + +namespace Aerospike.Demo +{ + public class AsyncTransaction : AsyncExample + { + private bool completed; + + public AsyncTransaction(Console console) : base(console) + { + } + + /// + /// Multi-record transaction. + /// + public override void RunExample(AsyncClient client, Arguments args) + { + completed = false; + + Txn txn = new(); + + console.Info("Begin txn: " + txn.Id); + Put(client, txn, args); + + WaitTillComplete(); + } + + public void Put(AsyncClient client, Txn txn, Arguments args) + { + console.Info("Run put"); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + + Key key = new(args.ns, args.set, 1); + + client.Put(wp, new PutHandler(this, client, key, txn, args), key, new Bin("a", "val1")); + } + + class PutHandler : WriteListener + { + private readonly AsyncTransaction parent; + private readonly AsyncClient client; + private readonly Key key; + private readonly Txn txn; + private readonly Arguments args; + + public PutHandler(AsyncTransaction parent, AsyncClient client, Key key, Txn txn, Arguments args) + { + this.parent = parent; + this.client = client; + this.key = key; + this.txn = txn; + this.args = args; + } + + public void OnSuccess(Key key) + { + parent.PutAnother(client, txn, args); + } + + public void OnFailure(AerospikeException e) + { + parent.console.Error("Failed to write: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + parent.Abort(client, txn); + } + }; + + public void PutAnother(AsyncClient client, Txn txn, Arguments args) + { + console.Info("Run another put"); + + var wp = client.WritePolicyDefault; + wp.Txn = txn; + + Key key = new(args.ns, args.set, 2); + + client.Put(wp, new PutAnotherHandler(this, client, key, txn, args), key, new Bin("b", "val2")); + } + + class PutAnotherHandler : WriteListener + { + private readonly AsyncTransaction parent; + private readonly AsyncClient client; + private readonly Key key; + private readonly Txn txn; + private readonly Arguments args; + + public PutAnotherHandler(AsyncTransaction parent, AsyncClient client, Key key, Txn txn, Arguments args) + { + this.parent = parent; + this.client = client; + this.key = key; + this.txn = txn; + this.args = args; + } + + public void OnSuccess(Key key) + { + parent.Get(client, txn, args); + } + + public void OnFailure(AerospikeException e) + { + parent.console.Error("Failed to write: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + parent.Abort(client, txn); + } + } + + public void Get(AsyncClient client, Txn txn, Arguments args) + { + console.Info("Run get"); + + var p = client.ReadPolicyDefault; + p.Txn = txn; + + Key key = new(args.ns, args.set, 3); + + client.Get(p, new GetHandler(this, client, key, txn, args), key); + } + + class GetHandler : RecordListener + { + private readonly AsyncTransaction parent; + private readonly AsyncClient client; + private readonly Key key; + private readonly Txn txn; + private readonly Arguments args; + + public GetHandler(AsyncTransaction parent, AsyncClient client, Key key, Txn txn, Arguments args) + { + this.parent = parent; + this.client = client; + this.key = key; + this.txn = txn; + this.args = args; + } + + public void OnSuccess(Key key, Record record) + { + parent.Delete(client, txn, args); + } + + public void OnFailure(AerospikeException e) + { + parent.console.Error("Failed to read: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + parent.Abort(client, txn); + } + } + + public void Delete(AsyncClient client, Txn txn, Arguments args) + { + console.Info("Run delete"); + + var dp = client.WritePolicyDefault; + dp.Txn = txn; + dp.durableDelete = true; // Required when running delete in a MRT. + + Key key = new(args.ns, args.set, 3); + + client.Delete(dp, new DeleteHandler(this, client, key, txn), key); + } + + class DeleteHandler : DeleteListener + { + private readonly AsyncTransaction parent; + private readonly AsyncClient client; + private readonly Key key; + private readonly Txn txn; + + public DeleteHandler(AsyncTransaction parent, AsyncClient client, Key key, Txn txn) + { + this.parent = parent; + this.client = client; + this.key = key; + this.txn = txn; + } + + public void OnSuccess(Key key, bool existed) + { + parent.Commit(client, txn); + } + + public void OnFailure(AerospikeException e) + { + parent.console.Error("Failed to delete: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + parent.Abort(client, txn); + } + + } + + public void Commit(AsyncClient client, Txn txn) + { + console.Info("Run commit"); + + client.Commit(new CommitHandler(this, txn), txn); + } + + class CommitHandler : CommitListener + { + private readonly AsyncTransaction parent; + private readonly Txn txn; + + public CommitHandler(AsyncTransaction parent, Txn txn) + { + this.parent = parent; + this.txn = txn; + } + + public void OnSuccess(CommitStatus.CommitStatusType status) + { + parent.console.Info("Txn committed: " + txn.Id); + parent.NotifyComplete(); + } + + public void OnFailure(AerospikeException.Commit ae) + { + parent.console.Error("Txn commit failed: " + txn.Id); + parent.NotifyComplete(); + } + } + + public void Abort(AsyncClient client, Txn txn) + { + console.Info("Run abort"); + + client.Abort(new AbortHandler(this, txn), txn); + } + + class AbortHandler : AbortListener + { + private readonly AsyncTransaction parent; + private readonly Txn txn; + + public AbortHandler(AsyncTransaction parent, Txn txn) + { + this.parent = parent; + this.txn = txn; + } + + public void OnSuccess(AbortStatus.AbortStatusType status) + { + parent.console.Error("Txn aborted: " + txn.Id); + parent.NotifyComplete(); + } + } + + private void WaitTillComplete() + { + lock (this) + { + while (!completed) + { + Monitor.Wait(this); + } + } + } + + private void NotifyComplete() + { + lock (this) + { + completed = true; + Monitor.Pulse(this); + } + } + } +} diff --git a/AerospikeDemo/AsyncTransactionWithTask.cs b/AerospikeDemo/AsyncTransactionWithTask.cs new file mode 100644 index 00000000..cbe020e1 --- /dev/null +++ b/AerospikeDemo/AsyncTransactionWithTask.cs @@ -0,0 +1,100 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using Neo.IronLua; +using System; +using System.Threading; +using System.Threading.Tasks; +using System.Windows.Forms; +using static System.Windows.Forms.VisualStyles.VisualStyleElement.TextBox; + +namespace Aerospike.Demo +{ + public class AsyncTransactionWithTask : AsyncExample + { + private bool completed; + + public AsyncTransactionWithTask(Console console) : base(console) + { + } + + /// + /// Multi-record transaction. + /// + public override void RunExample(AsyncClient client, Arguments args) + { + Txn txn = new(); + var token = CancellationToken.None; + + console.Info("Begin txn: " + txn.Id); + + try + { + Key key = null; + + var result = Task.Run(async () => + { + try + { + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + + console.Info("Run put with task"); + key = new(args.ns, args.set, 1); + await client.Put(wp, token, key, new Bin("a", "val1")); + + console.Info("Run another put"); + key = new(args.ns, args.set, 2); + await client.Put(wp, token, key, new Bin("b", "val2")); + + console.Info("Run get"); + var p = client.ReadPolicyDefault; + p.Txn = txn; + Key key3 = new(args.ns, args.set, 3); + Record rec = await client.Get(p, token, key3); + + console.Info("Run delete"); + var dp = client.WritePolicyDefault; + dp.Txn = txn; + dp.durableDelete = true; // Required when running delete in a MRT. + client.Delete(dp, key3); + + await client.Commit(txn, token); + return true; + } + catch (Exception e) + { + console.Error("Failed to write: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + // Abort and rollback MRT (multi-record transaction) if any errors occur. + await client.Abort(txn, token); + return false; + } + }).Result; + + if (result) + console.Info("Txn committed: " + txn.Id); + else + console.Error("Txn aborted: " + txn.Id); + } + catch (Exception e) + { + console.Error($"Txn {txn.Id} Exception: {e}"); + } + } + } +} diff --git a/AerospikeDemo/DemoForm.cs b/AerospikeDemo/DemoForm.cs index 6b31f8dd..554ffbd3 100644 --- a/AerospikeDemo/DemoForm.cs +++ b/AerospikeDemo/DemoForm.cs @@ -73,6 +73,7 @@ private void FormInit() #endif new ExampleTreeNode("Expire", new Expire(console)), new ExampleTreeNode("Touch", new Touch(console)), + new ExampleTreeNode("Transaction", new Transaction(console)), new ExampleTreeNode("Operate", new Operate(console)), new ExampleTreeNode("OperateBit", new OperateBit(console)), new ExampleTreeNode("OperateList", new OperateList(console)), @@ -87,6 +88,8 @@ private void FormInit() new ExampleTreeNode("Async Batch", new AsyncBatch(console)), new ExampleTreeNode("Async Scan", new AsyncScan(console)), new ExampleTreeNode("Async Scan Page", new AsyncScanPage(console)), + new ExampleTreeNode("Async Transaction", new AsyncTransaction(console)), + new ExampleTreeNode("Async Transaction with Task", new AsyncTransactionWithTask(console)), new ExampleTreeNode("Async Query", new AsyncQuery(console)), new ExampleTreeNode("Async UDF", new AsyncUserDefinedFunction(console)), new ExampleTreeNode("List/Map", new ListMap(console)), diff --git a/AerospikeDemo/Transaction.cs b/AerospikeDemo/Transaction.cs new file mode 100644 index 00000000..7c04098c --- /dev/null +++ b/AerospikeDemo/Transaction.cs @@ -0,0 +1,79 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using System; + +namespace Aerospike.Demo +{ + public class Transaction : SyncExample + { + public Transaction(Console console) : base(console) + { + } + + /// + /// Multi-record transaction. + /// + public override void RunExample(IAerospikeClient client, Arguments args) + { + TxnReadWrite(client, args); + } + + private void TxnReadWrite(IAerospikeClient client, Arguments args) + { + Txn txn = new(); + console.Info("Begin txn: " + txn.Id); + + try + { + var wp = client.WritePolicyDefault; + wp.Txn = txn; + + console.Info("Run put"); + Key key1 = new(args.ns, args.set, 1); + client.Put(wp, key1, new Bin("a", "val1")); + + console.Info("Run another put"); + Key key2 = new(args.ns, args.set, 2); + client.Put(wp, key2, new Bin("b", "val2")); + + console.Info("Run get"); + var p = client.ReadPolicyDefault; + p.Txn = txn; + + Key key3 = new(args.ns, args.set, 3); + Record rec = client.Get(p, key3); + + console.Info("Run delete"); + var dp = client.WritePolicyDefault; + dp.Txn = txn; + dp.durableDelete = true; // Required when running delete in a MRT. + client.Delete(dp, key3); + } + catch (Exception) + { + // Abort and rollback MRT (multi-record transaction) if any errors occur. + console.Info("Abort txn: " + txn.Id); + client.Abort(txn); + throw; + } + + console.Info("Commit txn: " + txn.Id); + client.Commit(txn); + } + } +} diff --git a/AerospikeTest/AerospikeTest.csproj b/AerospikeTest/AerospikeTest.csproj index 2a613d4e..c9494c62 100644 --- a/AerospikeTest/AerospikeTest.csproj +++ b/AerospikeTest/AerospikeTest.csproj @@ -1,4 +1,4 @@ - + Debug diff --git a/AerospikeTest/Args.cs b/AerospikeTest/Args.cs index 9b1dad6b..7413c816 100644 --- a/AerospikeTest/Args.cs +++ b/AerospikeTest/Args.cs @@ -1,225 +1,208 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using Aerospike.Client; -using Microsoft.Extensions.Configuration; -using Microsoft.VisualStudio.TestTools.UnitTesting; - -namespace Aerospike.Test -{ - [TestClass] - public class Args - { - public static Args Instance = new Args(); - - public IAerospikeClient client; - public AerospikeClient nativeClient; - public IAsyncClient asyncClient; - public AsyncClient nativeAsync; - public Host[] hosts; - public int port; - public string user; - public string password; - public int timeout; - public string clusterName; - public string ns; - public string set; - public string tlsName; - public TlsPolicy tlsPolicy; - public AuthMode authMode; - public bool singleBin; - public bool enterprise; - - public Args() - { - Log.Disable(); - - var builder = new ConfigurationBuilder().AddJsonFile("settings.json", optional: true, reloadOnChange: true); - IConfigurationRoot section = builder.Build(); - - port = int.Parse(section.GetSection("Port").Value); - clusterName = section.GetSection("ClusterName").Value; - user = section.GetSection("User").Value; - password = section.GetSection("Password").Value; - timeout = int.Parse(section.GetSection("Timeout").Value); - ns = section.GetSection("Namespace").Value; - set = section.GetSection("Set").Value; - authMode = (AuthMode)Enum.Parse(typeof(AuthMode), section.GetSection("AuthMode").Value, true); - - bool tlsEnable = bool.Parse(section.GetSection("TlsEnable").Value); - - if (tlsEnable) - { - tlsName = section.GetSection("TlsName").Value; - tlsPolicy = new TlsPolicy( - section.GetSection("TlsProtocols").Value, - section.GetSection("TlsRevoke").Value, - section.GetSection("TlsClientCertFile").Value, - bool.Parse(section.GetSection("TlsLoginOnly").Value) - ); - } - - var hostName = section.GetSection("Host").Value; - if (hostName == null || hostName == String.Empty) - { - hosts = null; - } - else - { - hosts = Host.ParseHosts(hostName, tlsName, port); - } - } - - public void Connect() - { - ConnectSync(); - ConnectAsync(); - } - - private void ConnectSync() - { - ClientPolicy policy = new ClientPolicy(); - policy.clusterName = clusterName; - policy.tlsPolicy = tlsPolicy; - policy.authMode = authMode; - policy.timeout = timeout; - - if (user != null && user.Length > 0) - { - policy.user = user; - policy.password = password; - } - - nativeClient = new AerospikeClient(policy, hosts); - - nativeClient.readPolicyDefault.totalTimeout = timeout; - nativeClient.WritePolicyDefault.totalTimeout = timeout; - nativeClient.ScanPolicyDefault.totalTimeout = timeout; - nativeClient.QueryPolicyDefault.totalTimeout = timeout; - nativeClient.BatchPolicyDefault.totalTimeout = timeout; - nativeClient.BatchParentPolicyWriteDefault.totalTimeout = timeout; - nativeClient.InfoPolicyDefault.timeout = timeout; - client = nativeClient; - - //Example of how to enable metrics - //client.EnableMetrics(new MetricsPolicy()); - - try - { - SetServerSpecific(); - } - catch - { - client.Close(); - client = null; - throw; - } - } - - private void ConnectAsync() - { - AsyncClientPolicy policy = new AsyncClientPolicy(); - policy.clusterName = clusterName; - policy.tlsPolicy = tlsPolicy; - policy.authMode = authMode; - policy.asyncMaxCommands = 300; - policy.timeout = timeout; - - if (user != null && user.Length > 0) - { - policy.user = user; - policy.password = password; - } - - nativeAsync = new AsyncClient(policy, hosts); - - nativeAsync.readPolicyDefault.totalTimeout = timeout; - nativeAsync.WritePolicyDefault.totalTimeout = timeout; - nativeAsync.ScanPolicyDefault.totalTimeout = timeout; - nativeAsync.QueryPolicyDefault.totalTimeout = timeout; - nativeAsync.BatchPolicyDefault.totalTimeout = timeout; - nativeAsync.BatchParentPolicyWriteDefault.totalTimeout = timeout; - nativeAsync.InfoPolicyDefault.timeout = timeout; - - asyncClient = nativeAsync; - - // Example of how to enable metrics - //asyncClient.EnableMetrics(new MetricsPolicy()); - } - - private void SetServerSpecific() - { - Node node = nativeClient.Nodes[0]; - string namespaceFilter = "namespace/" + ns; - Dictionary map = Info.Request(null, node, "edition", namespaceFilter); - - string edition = map["edition"]; - enterprise = edition.Equals("Aerospike Enterprise Edition"); - - string namespaceTokens = map[namespaceFilter]; - - if (namespaceTokens == null) - { - throw new Exception(string.Format("Failed to get namespace info: host={0} namespace={1}", node, ns)); - } - - singleBin = ParseBoolean(namespaceTokens, "single-bin"); - } - - private static bool ParseBoolean(String namespaceTokens, String name) - { - string search = name + '='; - int begin = namespaceTokens.IndexOf(search); - - if (begin < 0) - { - return false; - } - - begin += search.Length; - int end = namespaceTokens.IndexOf(';', begin); - - if (end < 0) - { - end = namespaceTokens.Length; - } - - string value = namespaceTokens.Substring(begin, end - begin); - return Convert.ToBoolean(value); - } - - public string GetBinName(string name) - { - // Single bin servers don't need a bin name. - return singleBin ? "" : name; - } - - public void Close() - { - if (client != null) - { - client.Close(); - client = null; - } - - if (asyncClient != null) - { - asyncClient.Close(); - asyncClient = null; - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using Microsoft.Extensions.Configuration; +using Microsoft.VisualStudio.TestTools.UnitTesting; + +namespace Aerospike.Test +{ + [TestClass] + public class Args + { + public static Args Instance = new Args(); + + public IAerospikeClient client; + public IAsyncClient asyncClient; + public Host[] hosts; + public int port; + public string user; + public string password; + public int timeout; + public string clusterName; + public string ns; + public string set; + public bool useServicesAlternate; + public string tlsName; + public TlsPolicy tlsPolicy; + public AuthMode authMode; + public bool singleBin; + public bool enterprise; + + public Args() + { + Log.Disable(); + + var builder = new ConfigurationBuilder().AddJsonFile("settings.json", optional: true, reloadOnChange: true); + IConfigurationRoot section = builder.Build(); + + port = int.Parse(section.GetSection("Port").Value); + clusterName = section.GetSection("ClusterName").Value; + user = section.GetSection("User").Value; + password = section.GetSection("Password").Value; + timeout = int.Parse(section.GetSection("Timeout").Value); + ns = section.GetSection("Namespace").Value; + set = section.GetSection("Set").Value; + authMode = (AuthMode)Enum.Parse(typeof(AuthMode), section.GetSection("AuthMode").Value, true); + useServicesAlternate = bool.Parse(section.GetSection("UseServicesAlternate").Value); + + bool tlsEnable = bool.Parse(section.GetSection("TlsEnable").Value); + + if (tlsEnable) + { + tlsName = section.GetSection("TlsName").Value; + tlsPolicy = new TlsPolicy( + section.GetSection("TlsProtocols").Value, + section.GetSection("TlsRevoke").Value, + section.GetSection("TlsClientCertFile").Value, + bool.Parse(section.GetSection("TlsLoginOnly").Value) + ); + } + + var hostName = section.GetSection("Host").Value; + if (hostName == null || hostName == String.Empty) + { + hosts = null; + } + else + { + hosts = Host.ParseHosts(hostName, tlsName, port); + } + } + + public void Connect() + { + ConnectSync(); + ConnectAsync(); + } + + private void ConnectSync() + { + ClientPolicy policy = new ClientPolicy(); + policy.clusterName = clusterName; + policy.tlsPolicy = tlsPolicy; + policy.authMode = authMode; + policy.timeout = timeout; + policy.useServicesAlternate = useServicesAlternate; + + if (user != null && user.Length > 0) + { + policy.user = user; + policy.password = password; + } + + client = new AerospikeClient(policy, hosts); + + //Example of how to enable metrics + //client.EnableMetrics(new MetricsPolicy()); + + try + { + SetServerSpecific(); + } + catch + { + client.Close(); + client = null; + throw; + } + } + + private void ConnectAsync() + { + AsyncClientPolicy policy = new AsyncClientPolicy(); + policy.clusterName = clusterName; + policy.tlsPolicy = tlsPolicy; + policy.authMode = authMode; + policy.asyncMaxCommands = 300; + policy.timeout = timeout; + policy.useServicesAlternate = useServicesAlternate; + + if (user != null && user.Length > 0) + { + policy.user = user; + policy.password = password; + } + + asyncClient = new AsyncClient(policy, hosts); + + // Example of how to enable metrics + //asyncClient.EnableMetrics(new MetricsPolicy()); + } + + private void SetServerSpecific() + { + Node node = client.Nodes[0]; + string namespaceFilter = "namespace/" + ns; + Dictionary map = Info.Request(null, node, "edition", namespaceFilter); + + string edition = map["edition"]; + enterprise = edition.Equals("Aerospike Enterprise Edition"); + + string namespaceTokens = map[namespaceFilter]; + + if (namespaceTokens == null) + { + throw new Exception(string.Format("Failed to get namespace info: host={0} namespace={1}", node, ns)); + } + + singleBin = ParseBoolean(namespaceTokens, "single-bin"); + } + + private static bool ParseBoolean(String namespaceTokens, String name) + { + string search = name + '='; + int begin = namespaceTokens.IndexOf(search); + + if (begin < 0) + { + return false; + } + + begin += search.Length; + int end = namespaceTokens.IndexOf(';', begin); + + if (end < 0) + { + end = namespaceTokens.Length; + } + + string value = namespaceTokens.Substring(begin, end - begin); + return Convert.ToBoolean(value); + } + + public string GetBinName(string name) + { + // Single bin servers don't need a bin name. + return singleBin ? "" : name; + } + + public void Close() + { + if (client != null) + { + client.Close(); + client = null; + } + + if (asyncClient != null) + { + asyncClient.Close(); + asyncClient = null; + } + } + } +} diff --git a/AerospikeTest/Async/TestAsync.cs b/AerospikeTest/Async/TestAsync.cs index b6e4b6fe..7a638a32 100644 --- a/AerospikeTest/Async/TestAsync.cs +++ b/AerospikeTest/Async/TestAsync.cs @@ -16,6 +16,7 @@ */ using Aerospike.Client; using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; namespace Aerospike.Test { @@ -79,6 +80,30 @@ public bool AssertBinEqual(Key key, Record record, string binName, int expected) return true; } + public bool AssertBatchEqual(Key[] keys, Record[] recs, String binName, int expected) + { + for (int i = 0; i < keys.Length; i++) + { + Key key = keys[i]; + Record rec = recs[i]; + + if (rec == null) + { + monitor.SetError(new Exception("recs[" + i + "] is null")); + return false; + } + + int received = rec.GetInt(binName); + + if (expected != received) + { + monitor.SetError(new Exception("Data mismatch: Expected " + expected + ". Received[" + i + "] " + received)); + return false; + } + } + return true; + } + public bool AssertRecordFound(Key key, Record record) { if (record == null) @@ -89,6 +114,16 @@ public bool AssertRecordFound(Key key, Record record) return true; } + public bool AssertRecordNotFound(Key key, Record record) + { + if (record != null) + { + monitor.SetError(new Exception("Record should not exist: namespace=" + args.ns + " set=" + args.set + " key=" + key.userKey)); + return false; + } + return true; + } + public bool AssertBetween(long begin, long end, long value) { if (!(value >= begin && value <= end)) diff --git a/AerospikeTest/Async/TestAsyncTxn.cs b/AerospikeTest/Async/TestAsyncTxn.cs new file mode 100644 index 00000000..af397f37 --- /dev/null +++ b/AerospikeTest/Async/TestAsyncTxn.cs @@ -0,0 +1,1155 @@ +/* + * Copyright 2012-2018 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Aerospike.Client; +using System.Reflection; +using System.Text; +using static Aerospike.Client.CommitStatus; +using static Aerospike.Client.AbortStatus; + +namespace Aerospike.Test +{ + [TestClass] + public class TestAsyncTxn : TestAsync + { + private static readonly string binName = "bin"; + + [ClassInitialize()] + public static void Prepare(TestContext testContext) + { + Assembly assembly = Assembly.GetExecutingAssembly(); + RegisterTask task = client.Register(null, assembly, "Aerospike.Test.LuaResources.record_example.lua", "record_example.lua", Language.LUA); + task.Wait(); + } + + [TestMethod] + public void AsyncTxnWrite() + { + Key key = new(args.ns, args.set, "asyncTxnWrite"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteTwice() + { + Key key = new(args.ns, args.set, "asyncTxnWriteTwice"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(txn, key, "val1"), + new Put(txn, key, "val2"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteBlock() + { + Key key = new(args.ns, args.set, "asyncTxnWriteBlock"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new Put(null, key, "val3", ResultCode.MRT_BLOCKED), // Should be blocked + new Commit(txn), + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteRead() + { + Key key = new(args.ns, args.set, "asyncTxnWriteRead"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new GetExpect(null, key, "val1"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteAbort() + { + Key key = new(args.ns, args.set, "asyncTxnWriteAbort"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new GetExpect(txn, key, "val2"), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnDelete() + { + Key key = new(args.ns, args.set, "asyncTxnDelete"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Delete(txn, key), + new Commit(txn), + new GetExpect(null, key, null) + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnDeleteAbort() + { + Key key = new(args.ns, args.set, "asyncTxnDeleteAbort"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Delete(txn, key), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnDeleteTwice() + { + Key key = new(args.ns, args.set, "asyncTxnDeleteTwice"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Delete(txn, key), + new Delete(txn, key), + new Commit(txn), + new GetExpect(null, key, null) + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnTouch() + { + Key key = new(args.ns, args.set, "asyncTxnTouch"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Touch(txn, key), + new Commit(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnTouchAbort() + { + Key key = new(args.ns, args.set, "asyncTxnTouchAbort"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Touch(txn, key), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnOperateWrite() + { + Key key = new(args.ns, args.set, "asyncTxnOperateWrite3"); + Txn txn = new(); + Bin bin2 = new("bin2", "bal1"); + + var cmds = new Runner[] + { + new Put(null, key, new Bin(binName, "val1"), bin2), + new OperateExpect(txn, key, + bin2, + Operation.Put(new Bin(binName, "val2")), + Operation.Get(bin2.name) + ), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnOperateWriteAbort() + { + Key key = new(args.ns, args.set, "asyncTxnOperateWriteAbort"); + Txn txn = new(); + Bin bin2 = new("bin2", "bal1"); + + var cmds = new Runner[] + { + new Put(null, key, new Bin(binName, "val1"), bin2), + new OperateExpect(txn, key, + bin2, + Operation.Put(new Bin(binName, "val2")), + Operation.Get(bin2.name) + ), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnUDF() + { + Key key = new(args.ns, args.set, "asyncTxnUDF"); + Txn txn = new(); + Bin bin2 = new("bin2", "bal1"); + + var cmds = new Runner[] + { + new Put(null, key, new Bin(binName, "val1"), bin2), + new UDF(txn, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnUDFAbort() + { + Key key = new(args.ns, args.set, "asyncTxnUDFAbort"); + Txn txn = new(); + Bin bin2 = new("bin2", "bal1"); + + var cmds = new Runner[] + { + new Put(null, key, new Bin(binName, "val1"), bin2), + new UDF(txn, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnBatch() + { + Key[] keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, "asyncTxnBatch" + i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Txn txn = new(); + bin = new(binName, 2); + + var cmds = new Runner[] + { + new BatchGetExpect(null, keys, 1), + new BatchOperate(txn, keys, Operation.Put(bin)), + new Commit(txn), + new BatchGetExpect(null, keys, 2), + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnBatchAbort() + { + var keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, "asyncTxnBatch" + i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Txn txn = new(); + bin = new Bin(binName, 2); + + var cmds = new Runner[] + { + new BatchGetExpect(null, keys, 1), + new BatchOperate(txn, keys, Operation.Put(bin)), + new Abort(txn), + new BatchGetExpect(null, keys, 1), + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteCommitAbort() + { + Key key = new(args.ns, args.set, "asyncTxnCommitAbort"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new Commit(txn), + new GetExpect(null, key, "val2"), + new Abort(txn, AbortStatus.AbortStatusType.ALREADY_COMMITTED) + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteReadTwoTxn() + { + Key key = new(args.ns, args.set, "asyncTxnWriteReadTwoTxn"); + Txn txn1 = new(); + Txn txn2 = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new GetExpect(txn1, key, "val1"), + new GetExpect(txn2, key, "val1"), + new Commit(txn1), + new Commit(txn2), + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnLUTCommit() // Test Case 38 + { + Key key1 = new(args.ns, args.set, "asyncTxnLUTCommit1"); + Key key2 = new(args.ns, args.set, "asyncTxnLUTCommit2"); + Key key3 = new(args.ns, args.set, "asyncTxnLUTCommit3"); + Txn txn = new(); // T1 + + var cmds = new Runner[] + { + new Delete(null, key1), // Prep + new Delete(null, key2), + new Delete(null, key3), + new Put(txn, key1, "val1"), // T1 + new GetExpect(txn, key1, "val1", 1), // T2 + new Put(txn, key1, "val11"), // T3 + new GetExpect(txn, key1, "val11", 2), // T4 + new Put(null, key2, "val1"), // T5 + new GetExpect(txn, key2, "val1", 1), // T6 + new Put(txn, key2, "val11"), // T7 + new GetExpect(txn, key2, "val11", 2), // T8 + new Put(txn, key3, "val1"), // T9 + new GetExpect(txn, key3, "val1", 1), // T10 + new Commit(txn), // T11 + new GetExpect(null, key1, "val11", 3), // T12 + new GetExpect(null, key2, "val11", 3), + new GetExpect(null, key3, "val1", 2) + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteAfterCommit() + { + Key key = new(args.ns, args.set, "asyncTxnWriteAfter"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(txn, key, "val1"), + new Commit(txn), + new Sleep(1000), + new Put(txn, key, "val1", ResultCode.MRT_EXPIRED), + }; + + Execute(cmds); + } + + private void Execute(Runner[] cmdArray) + { + Cmds a = new(this, cmdArray); + a.RunNext(); + WaitTillComplete(); + } + + private void OnError(Exception e) + { + SetError(e); + NotifyCompleted(); + } + + private void OnError(Exception e, int expectedResult) + { + if (e is AerospikeException ae) + { + if (ae.Result == expectedResult) + { + NotifyCompleted(); + return; + } + } + + OnError(e); + } + + private void OnError() + { + // Error is located in monitor instance which is checked in waitTillComplete(); + NotifyCompleted(); + } + + private class Cmds : Listener + { + private readonly TestAsyncTxn parent; + readonly Runner[] cmds; + int idx; + + public Cmds(TestAsyncTxn parent, Runner[] cmds) + { + this.parent = parent; + this.cmds = cmds; + this.idx = -1; + } + + public void RunNext() + { + if (++idx == cmds.Length) + { + parent.NotifyCompleted(); + return; + } + + try + { + cmds[idx].Run(parent, this); + } + catch (Exception e) + { + parent.OnError(e); + } + } + + public void OnSuccess() + { + RunNext(); + } + + public void OnFailure() + { + parent.OnError(); + } + + public void OnFailure(Exception e) + { + parent.OnError(e); + } + + public void OnFailure(Exception e, int expectedResult) + { + parent.OnError(e, expectedResult); + } + } + + public class Commit : Runner + { + private readonly Txn txn; + private readonly bool throwsCommitException; + + public Commit(Txn txn) + { + this.txn = txn; + this.throwsCommitException = false; + } + + public Commit(Txn txn, bool throwsCommitException) + { + this.txn = txn; + this.throwsCommitException = throwsCommitException; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + client.Commit(new CommitHandler(listener, throwsCommitException), txn); + } + + private class CommitHandler : CommitListener + { + private readonly Listener listener; + private readonly bool throwsCommitException; + + public CommitHandler(Listener listener, bool throwsCommitException) + { + this.listener = listener; + this.throwsCommitException = throwsCommitException; + } + + public void OnSuccess(CommitStatusType status) + { + if (status == CommitStatusType.OK) + { + listener.OnSuccess(); + return; + } + listener.OnFailure(); + } + + public void OnFailure(AerospikeException.Commit e) + { + if (throwsCommitException) + { + listener.OnSuccess(); + return; + } + + listener.OnFailure(e); + } + } + } + + + + public class Abort : Runner + { + private readonly Txn txn; + private readonly AbortStatusType status; + + public Abort(Txn txn) + { + this.txn = txn; + this.status = AbortStatusType.OK; + } + + public Abort(Txn txn, AbortStatusType abortStatus) + { + this.txn = txn; + this.status = abortStatus; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + client.Abort(new AbortHandler(listener, status), txn); + } + + private class AbortHandler : AbortListener + { + private readonly Listener listener; + private readonly AbortStatusType status; + + public AbortHandler(Listener listener, AbortStatusType status) + { + this.listener = listener; + this.status = status; + } + + public void OnSuccess(AbortStatusType status) + { + if (status == this.status) + { + listener.OnSuccess(); + return; + } + listener.OnFailure(); + } + } + } + + public class Put : Runner + { + private readonly Txn txn; + private readonly Key key; + private readonly Bin[] bins; + private readonly int expectedResult = 0; + + public Put(Txn txn, Key key, string val) + { + this.txn = txn; + this.key = key; + this.bins = new Bin[] { new(binName, val)}; + } + + public Put(Txn txn, Key key, string val, int expectedResult) + { + this.txn = txn; + this.key = key; + this.bins = new Bin[] { new(binName, val) }; + this.expectedResult = expectedResult; + } + + public Put(Txn txn, Key key, params Bin[] bins) + { + this.txn = txn; + this.key = key; + this.bins = bins; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + if (txn != null) + { + wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + } + client.Put(wp, new PutHandler(listener, expectedResult), key, bins); + } + + private class PutHandler : WriteListener + { + private readonly Listener listener; + private readonly int expectedResult; + + public PutHandler(Listener listener, int expectedResult) + { + this.listener = listener; + this.expectedResult = expectedResult; + } + + public void OnSuccess(Key key) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException e) + { + if (expectedResult != 0) + { + listener.OnFailure(e, expectedResult); + } + else + { + listener.OnFailure(e); + } + } + } + } + + public class GetExpect : Runner + { + private readonly Txn txn; + private readonly Key key; + private readonly string expect; + private readonly int generation; + + public GetExpect(Txn txn, Key key, string expect) + { + this.txn = txn; + this.key = key; + this.expect = expect; + generation = 0; // Do not check generation + } + + public GetExpect(Txn txn, Key key, string expect, int generation) + { + this.txn = txn; + this.key = key; + this.expect = expect; + this.generation = generation; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + Policy p = null; + + if (txn != null) + { + p = client.ReadPolicyDefault.Clone(); + p.Txn = txn; + } + client.Get(p, new GetExpectHandler(parent, listener, expect, generation), key); + } + + private class GetExpectHandler : RecordListener + { + private readonly TestAsyncTxn parent; + private readonly Listener listener; + private string expect; + private int generation; + + public GetExpectHandler(TestAsyncTxn parent, Listener listener, string expect, int generation) + { + this.parent = parent; + this.listener = listener; + this.expect = expect; + this.generation = generation; + } + + public void OnSuccess(Key key, Record record) + { + if (generation != 0) + { + if (generation != record.generation) + { + listener.OnFailure(new AssertFailedException("Expected generation: " + generation + " but got: " + record.generation)); + } + } + + if (expect != null) + { + if (parent.AssertBinEqual(key, record, binName, expect)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + else + { + if (parent.AssertRecordNotFound(key, record)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class OperateExpect : Runner + { + private readonly Txn txn; + private readonly Key key; + private readonly Operation[] ops; + private readonly Bin? expect; + + public OperateExpect(Txn txn, Key key, Bin? expect, params Operation[] ops) + { + this.txn = txn; + this.key = key; + this.expect = expect; + this.ops = ops; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + + if (txn != null) + { + wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + } + client.Operate(wp, new OperateExpectHandler(parent, listener, expect), key, ops); + } + + private class OperateExpectHandler : RecordListener + { + private readonly TestAsyncTxn parent; + + private readonly Listener listener; + private Bin? expect; + + public OperateExpectHandler(TestAsyncTxn parent, Listener listener, Bin? expect) + { + this.parent = parent; + this.listener = listener; + this.expect = expect; + } + + public void OnSuccess(Key key, Record record) + { + if (expect != null) + { + if (parent.AssertBinEqual(key, record, expect?.name, expect?.value.Object)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + else + { + if (parent.AssertRecordNotFound(key, record)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class UDF : Runner + { + private readonly Txn txn; + private readonly Key key; + private readonly string packageName; + private readonly string functionName; + private readonly Value[] functionArgs; + + public UDF( + Txn txn, + Key key, + string packageName, + string functionName, + params Value[] functionArgs + ) { + this.txn = txn; + this.key = key; + this.packageName = packageName; + this.functionName = functionName; + this.functionArgs = functionArgs; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + + if (txn != null) + { + wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + } + client.Execute(wp, new UDFHandler(listener), key, packageName, functionName, functionArgs); + } + + private class UDFHandler : ExecuteListener + { + private readonly Listener listener; + + public UDFHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(Key key, Object obj) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class BatchGetExpect : Runner + { + private readonly Txn txn; + private readonly Key[] keys; + private readonly int expected; + + public BatchGetExpect(Txn txn, Key[] keys, int expected) + { + this.txn = txn; + this.keys = keys; + this.expected = expected; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + BatchPolicy bp = null; + + if (txn != null) { + bp = client.BatchPolicyDefault.Clone(); + bp.Txn = txn; + } + client.Get(bp, new BatchGetExpectHandler(parent, listener, expected), keys); + } + + private class BatchGetExpectHandler : RecordArrayListener + { + private readonly TestAsyncTxn parent; + private readonly Listener listener; + private readonly int expected; + + public BatchGetExpectHandler(TestAsyncTxn parent, Listener listener, int expected) + { + this.parent = parent; + this.listener = listener; + this.expected = expected; + } + + public void OnSuccess(Key[] keys, Record[] records) + { + if (parent.AssertBatchEqual(keys, records, binName, expected)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class BatchOperate : Runner + { + private readonly Txn txn; + private readonly Key[] keys; + private readonly Operation[] ops; + + public BatchOperate(Txn txn, Key[] keys, params Operation[] ops) + { + this.txn = txn; + this.keys = keys; + this.ops = ops; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + BatchPolicy bp = null; + + if (txn != null) + { + bp = client.BatchParentPolicyWriteDefault.Clone(); + bp.Txn = txn; + } + client.Operate(bp, null, new BatchOperateHandler(listener), keys, ops); + } + + private class BatchOperateHandler : BatchRecordArrayListener + { + private Listener listener; + + public BatchOperateHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + if (status) + { + listener.OnSuccess(); + } + else + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + listener.OnFailure(new AerospikeException(sb.ToString())); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class Touch : Runner + { + private readonly Txn txn; + private readonly Key key; + + public Touch(Txn txn, Key key) + { + this.txn = txn; + this.key = key; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + + if (txn != null) { + wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + } + client.Touch(wp, new TouchHandler(listener), key); + } + + private class TouchHandler : WriteListener + { + private Listener listener; + + public TouchHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(Key key) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class Delete : Runner + { + private readonly Txn txn; + private readonly Key key; + + public Delete(Txn txn, Key key) + { + this.txn = txn; + this.key = key; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + + if (txn != null) + { + wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + wp.durableDelete = true; + } + client.Delete(wp, new DeleteHandler(listener), key); + } + + private class DeleteHandler : DeleteListener + { + private Listener listener; + + public DeleteHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(Key key, bool existed) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class Sleep : Runner + { + private readonly int sleepMillis; + + public Sleep(int sleepMillis) + { + this.sleepMillis = sleepMillis; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + Util.Sleep(sleepMillis); + parent.NotifyCompleted(); + } + } + + public interface Runner + { + void Run(TestAsyncTxn parent, Listener listener); + } + + public interface Listener + { + void OnSuccess(); + void OnFailure(); + void OnFailure(Exception e); + + void OnFailure(Exception e, int expectedResult); + } + } +} diff --git a/AerospikeTest/Sync/Basic/TestBatch.cs b/AerospikeTest/Sync/Basic/TestBatch.cs index b3927f49..8540f119 100644 --- a/AerospikeTest/Sync/Basic/TestBatch.cs +++ b/AerospikeTest/Sync/Basic/TestBatch.cs @@ -1,491 +1,491 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using Aerospike.Client; -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Neo.IronLua; -using System; -using System.Collections; -using System.Collections.Generic; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Runtime.Intrinsics.X86; -using System.Security.Policy; -using System.Text; - -namespace Aerospike.Test -{ - [TestClass] - public class TestBatch : TestSync - { - private const string BinName = "bbin"; - private const string BinName2 = "bbin2"; - private const string BinName3 = "bbin3"; - private const string ListBin = "lbin"; - private const string ListBin2 = "lbin2"; - private const string KeyPrefix = "tbatkey"; - private const string ValuePrefix = "batchvalue"; - private const int Size = 8; - - [ClassInitialize()] - public static void WriteRecords(TestContext testContext) - { - WritePolicy policy = new WritePolicy(); - policy.expiration = 2592000; - - for (int i = 1; i <= Size; i++) - { - Key key = new Key(args.ns, args.set, KeyPrefix + i); - Bin bin = new Bin(BinName, ValuePrefix + i); - - List list = new List(); - - for (int j = 0; j < i; j++) - { - list.Add(j * i); - } - - List list2 = new List(); - - for (int j = 0; j < 2; j++) - { - list2.Add(j); - } - - Bin listBin = new Bin(ListBin, list); - Bin listBin2 = new Bin(ListBin2, list2); - - if (i != 6) - { - client.Put(policy, key, bin, listBin, listBin2); - } - else - { - client.Put(policy, key, new Bin(BinName, i), listBin, listBin2); - } - } - - // Add records that will eventually be deleted. - client.Put(policy, new Key(args.ns, args.set, 10000), new Bin(BinName, 10000)); - client.Put(policy, new Key(args.ns, args.set, 10001), new Bin(BinName, 10001)); - client.Put(policy, new Key(args.ns, args.set, 10002), new Bin(BinName, 10002)); - } - - [TestMethod] - public void BatchExists() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - bool[] existsArray = client.Exists(null, keys); - Assert.AreEqual(Size, existsArray.Length); - - for (int i = 0; i < existsArray.Length; i++) - { - if (!existsArray[i]) - { - Assert.Fail("Some batch records not found."); - } - } - } - - [TestMethod] - public void BatchReads() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - Record[] records = client.Get(null, keys, BinName); - Assert.AreEqual(Size, records.Length); - - for (int i = 0; i < records.Length; i++) - { - Key key = keys[i]; - Record record = records[i]; - - if (i != 5) - { - AssertBinEqual(key, record, BinName, ValuePrefix + (i + 1)); - } - else - { - AssertBinEqual(key, record, BinName, i + 1); - } - } - } - - [TestMethod] - public void BatchReadHeaders() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - Record[] records = client.GetHeader(null, keys); - Assert.AreEqual(Size, records.Length); - - for (int i = 0; i < records.Length; i++) - { - Key key = keys[i]; - Record record = records[i]; - - AssertRecordFound(key, record); - Assert.AreNotEqual(0, record.generation); - // ttl can be zero if server default-ttl = 0. - //Assert.AreNotEqual(0, record.expiration); - } - } - - [TestMethod] - public void BatchReadComplex() - { - // Batch allows multiple namespaces in one call, but example test environment may only have one namespace. - - // bin * 8 - Expression exp = Exp.Build(Exp.Mul(Exp.IntBin(BinName), Exp.Val(8))); - Operation[] ops = Operation.Array(ExpOperation.Read(BinName, exp, ExpReadFlags.DEFAULT)); - - string[] bins = new string[] { BinName }; - List records = new List(); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 1), bins)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 2), true)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 3), true)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 4), false)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 5), true)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 6), ops)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 7), bins)); - - // This record should be found, but the requested bin will not be found. - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 8), new string[] { "binnotfound" })); - - // This record should not be found. - records.Add(new BatchRead(new Key(args.ns, args.set, "keynotfound"), bins)); - - // Execute batch. - client.Get(null, records); - - AssertBatchBinEqual(records, BinName, 0); - AssertBatchBinEqual(records, BinName, 1); - AssertBatchBinEqual(records, BinName, 2); - AssertBatchRecordExists(records, BinName, 3); - AssertBatchBinEqual(records, BinName, 4); - - BatchRead batch = records[5]; - AssertRecordFound(batch.key, batch.record); - int v = batch.record.GetInt(BinName); - Assert.AreEqual(48, v); - - AssertBatchBinEqual(records, BinName, 6); - - batch = records[7]; - AssertRecordFound(batch.key, batch.record); - object val = batch.record.GetValue("binnotfound"); - if (val != null) - { - Assert.Fail("Unexpected batch bin value received"); - } - - batch = records[8]; - if (batch.record != null) - { - Assert.Fail("Unexpected batch record received"); - } - } - - [TestMethod] - public void BatchListReadOperate() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - Record[] records = client.Get(null, keys, - ListOperation.Size(ListBin), - ListOperation.GetByIndex(ListBin, -1, ListReturnType.VALUE)); - - Assert.AreEqual(Size, records.Length); - - for (int i = 0; i < records.Length; i++) - { - Record record = records[i]; - IList results = record.GetList(ListBin); - long size = (long)results[0]; - long val = (long)results[1]; - - Assert.AreEqual(i + 1, size); - Assert.AreEqual(i * (i + 1), val); - } - } - - [TestMethod] - public void BatchListWriteOperate() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - // Add integer to list and get size and last element of list bin for all records. - BatchResults bresults = client.Operate(null, null, keys, - ListOperation.Insert(ListBin2, 0, Value.Get(1000)), - ListOperation.Size(ListBin2), - ListOperation.GetByIndex(ListBin2, -1, ListReturnType.VALUE) - ); - - for (int i = 0; i < bresults.records.Length; i++) - { - BatchRecord br = bresults.records[i]; - Assert.AreEqual(0, br.resultCode); - - IList results = br.record.GetList(ListBin2); - long size = (long)results[1]; - long val = (long)results[2]; - - Assert.AreEqual(3, size); - Assert.AreEqual(1, val); - } - } - - [TestMethod] - public void BatchOperateSendKey() - { - Key[] keys = new Key[3]; - for (int i = 0; i < 3; i++) - { - keys[i] = new Key(args.ns, args.set, "sendkey" + i); - } - - BatchWritePolicy batchWritePolicy = new() - { - sendKey = true - }; - - Operation[] ops = { - Operation.Put(new Bin("now", DateTime.Now.ToFileTime())) - }; - - client.Operate(null, batchWritePolicy, keys, ops); - - Key myKey = new(args.ns, args.set, "sendkey2"); - WritePolicy wp = new() - { - sendKey = true - }; - - client.Put(wp, myKey, new Bin("name", "Andrew")); - } - - [TestMethod] - public void BatchReadAllBins() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - Bin bin = new Bin("bin5", "NewValue"); - - BatchResults bresults = client.Operate(null, null, keys, - Operation.Put(bin), - Operation.Get() - ); - - for (int i = 0; i < bresults.records.Length; i++) - { - BatchRecord br = bresults.records[i]; - Assert.AreEqual(0, br.resultCode); - - Record r = br.record; - - string s = r.GetString(bin.name); - Assert.AreEqual("NewValue", s); - - object obj = r.GetValue(BinName); - Assert.IsNotNull(obj); - } - } - - [TestMethod] - public void BatchWriteComplex() - { - Expression wexp1 = Exp.Build(Exp.Add(Exp.IntBin(BinName), Exp.Val(1000))); - - Operation[] wops1 = Operation.Array(Operation.Put(new Bin(BinName2, 100))); - Operation[] wops2 = Operation.Array(ExpOperation.Write(BinName3, wexp1, ExpWriteFlags.DEFAULT)); - Operation[] rops1 = Operation.Array(Operation.Get(BinName2)); - Operation[] rops2 = Operation.Array(Operation.Get(BinName3)); - - BatchWritePolicy wp = new BatchWritePolicy(); - wp.sendKey = true; - - BatchWrite bw1 = new BatchWrite(new Key(args.ns, args.set, KeyPrefix + 1), wops1); - BatchWrite bw2 = new BatchWrite(new Key("invalid", args.set, KeyPrefix + 1), wops1); - BatchWrite bw3 = new BatchWrite(wp, new Key(args.ns, args.set, KeyPrefix + 6), wops2); - BatchDelete bd1 = new BatchDelete(new Key(args.ns, args.set, 10002)); - - List records = new List(); - records.Add(bw1); - records.Add(bw2); - records.Add(bw3); - records.Add(bd1); - - bool status = client.Operate(null, records); - - Assert.IsFalse(status); // "invalid" namespace triggers the false status. - Assert.AreEqual(0, bw1.resultCode); - AssertBinEqual(bw1.key, bw1.record, BinName2, 0); - Assert.AreEqual(ResultCode.INVALID_NAMESPACE, bw2.resultCode); - Assert.AreEqual(0, bw3.resultCode); - AssertBinEqual(bw3.key, bw3.record, BinName3, 0); - Assert.AreEqual(ResultCode.OK, bd1.resultCode); - - BatchRead br1 = new BatchRead(new Key(args.ns, args.set, KeyPrefix + 1), rops1); - BatchRead br2 = new BatchRead(new Key(args.ns, args.set, KeyPrefix + 6), rops2); - BatchRead br3 = new BatchRead(new Key(args.ns, args.set, 10002), true); - - records.Clear(); - records.Add(br1); - records.Add(br2); - records.Add(br3); - - status = client.Operate(null, records); - - Assert.IsFalse(status); // Read of deleted record causes status to be false. - AssertBinEqual(br1.key, br1.record, BinName2, 100); - AssertBinEqual(br2.key, br2.record, BinName3, 1006); - Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br3.resultCode); - } - - [TestMethod] - public void BatchDelete() - { - // Define keys - Key[] keys = new Key[] { new Key(args.ns, args.set, 10000), new Key(args.ns, args.set, 10001) }; - - // Ensure keys exists - bool[] exists = client.Exists(null, keys); - Assert.IsTrue(exists[0]); - Assert.IsTrue(exists[1]); - - // Delete keys - BatchResults br = client.Delete(null, null, keys); - Assert.IsTrue(br.status); - - // Ensure keys do not exist - exists = client.Exists(null, keys); - Assert.IsFalse(exists[0]); - Assert.IsFalse(exists[1]); - } - - [TestMethod] - public void BatchReadTTL() - { - // WARNING: This test takes a long time to run due to sleeps. - // Define keys - Key key1 = new(args.ns, args.set, 88888); - Key key2 = new(args.ns, args.set, 88889); - - // Write keys with ttl. - BatchWritePolicy bwp = new() - { - expiration = 10 - }; - Key[] keys = new Key[] { key1, key2 }; - client.Operate(null, bwp, keys, Operation.Put(new Bin("a", 1))); - - // Read records before they expire and reset read ttl on one record. - Util.Sleep(8000); - BatchReadPolicy brp1 = new() - { - readTouchTtlPercent = 80 - }; - - BatchReadPolicy brp2 = new() - { - readTouchTtlPercent = -1 - }; - - BatchRead br1 = new(brp1, key1, new String[] { "a" }); - BatchRead br2 = new(brp2, key2, new String[] { "a" }); - - List list = new() - { - br1, - br2 - }; - - bool rv = client.Operate(null, list); - - Assert.IsTrue(rv); - Assert.AreEqual(ResultCode.OK, br1.resultCode); - Assert.AreEqual(ResultCode.OK, br2.resultCode); - - // Read records again, but don't reset read ttl. - Util.Sleep(3000); - brp1.readTouchTtlPercent = -1; - brp2.readTouchTtlPercent = -1; - - br1 = new BatchRead(brp1, key1, new String[] { "a" }); - br2 = new BatchRead(brp2, key2, new String[] { "a" }); - - list.Clear(); - list.Add(br1); - list.Add(br2); - - rv = client.Operate(null, list); - - // Key 2 should have expired. - Assert.AreEqual(ResultCode.OK, br1.resultCode); - Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br2.resultCode); - Assert.IsFalse(rv); - - // Read record after it expires, showing it's gone. - Util.Sleep(8000); - rv = client.Operate(null, list); - Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br1.resultCode); - Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br2.resultCode); - Assert.IsFalse(rv); - } - - - private void AssertBatchBinEqual(List list, string binName, int i) - { - BatchRead batch = list[i]; - AssertBinEqual(batch.key, batch.record, binName, ValuePrefix + (i + 1)); - } - - private void AssertBatchRecordExists(List list, string binName, int i) - { - BatchRead batch = list[i]; - AssertRecordFound(batch.key, batch.record); - Assert.AreNotEqual(0, batch.record.generation); - // ttl can be zero if server default-ttl = 0. - // Assert.AreNotEqual(0, batch.record.expiration); - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Neo.IronLua; +using System; +using System.Collections; +using System.Collections.Generic; +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics.X86; +using System.Security.Policy; +using System.Text; + +namespace Aerospike.Test +{ + [TestClass] + public class TestBatch : TestSync + { + private const string BinName = "bbin"; + private const string BinName2 = "bbin2"; + private const string BinName3 = "bbin3"; + private const string ListBin = "lbin"; + private const string ListBin2 = "lbin2"; + private const string KeyPrefix = "tbatkey"; + private const string ValuePrefix = "batchvalue"; + private const int Size = 8; + + [ClassInitialize()] + public static void WriteRecords(TestContext testContext) + { + WritePolicy policy = new WritePolicy(); + policy.expiration = 2592000; + + for (int i = 1; i <= Size; i++) + { + Key key = new Key(args.ns, args.set, KeyPrefix + i); + Bin bin = new Bin(BinName, ValuePrefix + i); + + List list = new List(); + + for (int j = 0; j < i; j++) + { + list.Add(j * i); + } + + List list2 = new List(); + + for (int j = 0; j < 2; j++) + { + list2.Add(j); + } + + Bin listBin = new Bin(ListBin, list); + Bin listBin2 = new Bin(ListBin2, list2); + + if (i != 6) + { + client.Put(policy, key, bin, listBin, listBin2); + } + else + { + client.Put(policy, key, new Bin(BinName, i), listBin, listBin2); + } + } + + // Add records that will eventually be deleted. + client.Put(policy, new Key(args.ns, args.set, 10000), new Bin(BinName, 10000)); + client.Put(policy, new Key(args.ns, args.set, 10001), new Bin(BinName, 10001)); + client.Put(policy, new Key(args.ns, args.set, 10002), new Bin(BinName, 10002)); + } + + [TestMethod] + public void BatchExists() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + bool[] existsArray = client.Exists(null, keys); + Assert.AreEqual(Size, existsArray.Length); + + for (int i = 0; i < existsArray.Length; i++) + { + if (!existsArray[i]) + { + Assert.Fail("Some batch records not found."); + } + } + } + + [TestMethod] + public void BatchReads() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + Record[] records = client.Get(null, keys, BinName); + Assert.AreEqual(Size, records.Length); + + for (int i = 0; i < records.Length; i++) + { + Key key = keys[i]; + Record record = records[i]; + + if (i != 5) + { + AssertBinEqual(key, record, BinName, ValuePrefix + (i + 1)); + } + else + { + AssertBinEqual(key, record, BinName, i + 1); + } + } + } + + [TestMethod] + public void BatchReadHeaders() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + Record[] records = client.GetHeader(null, keys); + Assert.AreEqual(Size, records.Length); + + for (int i = 0; i < records.Length; i++) + { + Key key = keys[i]; + Record record = records[i]; + + AssertRecordFound(key, record); + Assert.AreNotEqual(0, record.generation); + // ttl can be zero if server default-ttl = 0. + //Assert.AreNotEqual(0, record.expiration); + } + } + + [TestMethod] + public void BatchReadComplex() + { + // Batch allows multiple namespaces in one call, but example test environment may only have one namespace. + + // bin * 8 + Expression exp = Exp.Build(Exp.Mul(Exp.IntBin(BinName), Exp.Val(8))); + Operation[] ops = Operation.Array(ExpOperation.Read(BinName, exp, ExpReadFlags.DEFAULT)); + + string[] bins = new string[] { BinName }; + List records = new List(); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 1), bins)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 2), true)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 3), true)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 4), false)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 5), true)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 6), ops)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 7), bins)); + + // This record should be found, but the requested bin will not be found. + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 8), new string[] { "binnotfound" })); + + // This record should not be found. + records.Add(new BatchRead(new Key(args.ns, args.set, "keynotfound"), bins)); + + // Execute batch. + client.Get(null, records); + + AssertBatchBinEqual(records, BinName, 0); + AssertBatchBinEqual(records, BinName, 1); + AssertBatchBinEqual(records, BinName, 2); + AssertBatchRecordExists(records, BinName, 3); + AssertBatchBinEqual(records, BinName, 4); + + BatchRead batch = records[5]; + AssertRecordFound(batch.key, batch.record); + int v = batch.record.GetInt(BinName); + Assert.AreEqual(48, v); + + AssertBatchBinEqual(records, BinName, 6); + + batch = records[7]; + AssertRecordFound(batch.key, batch.record); + object val = batch.record.GetValue("binnotfound"); + if (val != null) + { + Assert.Fail("Unexpected batch bin value received"); + } + + batch = records[8]; + if (batch.record != null) + { + Assert.Fail("Unexpected batch record received"); + } + } + + [TestMethod] + public void BatchListReadOperate() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + Record[] records = client.Get(null, keys, + ListOperation.Size(ListBin), + ListOperation.GetByIndex(ListBin, -1, ListReturnType.VALUE)); + + Assert.AreEqual(Size, records.Length); + + for (int i = 0; i < records.Length; i++) + { + Record record = records[i]; + IList results = record.GetList(ListBin); + long size = (long)results[0]; + long val = (long)results[1]; + + Assert.AreEqual(i + 1, size); + Assert.AreEqual(i * (i + 1), val); + } + } + + [TestMethod] + public void BatchListWriteOperate() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + // Add integer to list and get size and last element of list bin for all records. + BatchResults bresults = client.Operate(null, null, keys, + ListOperation.Insert(ListBin2, 0, Value.Get(1000)), + ListOperation.Size(ListBin2), + ListOperation.GetByIndex(ListBin2, -1, ListReturnType.VALUE) + ); + + for (int i = 0; i < bresults.records.Length; i++) + { + BatchRecord br = bresults.records[i]; + Assert.AreEqual(0, br.resultCode); + + IList results = br.record.GetList(ListBin2); + long size = (long)results[1]; + long val = (long)results[2]; + + Assert.AreEqual(3, size); + Assert.AreEqual(1, val); + } + } + + [TestMethod] + public void BatchOperateSendKey() + { + Key[] keys = new Key[3]; + for (int i = 0; i < 3; i++) + { + keys[i] = new Key(args.ns, args.set, "sendkey" + i); + } + + BatchWritePolicy batchWritePolicy = new() + { + sendKey = true + }; + + Operation[] ops = { + Operation.Put(new Bin("now", DateTime.Now.ToFileTime())) + }; + + client.Operate(null, batchWritePolicy, keys, ops); + + Key myKey = new(args.ns, args.set, "sendkey2"); + WritePolicy wp = new() + { + sendKey = true + }; + + client.Put(wp, myKey, new Bin("name", "Andrew")); + } + + [TestMethod] + public void BatchReadAllBins() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + Bin bin = new Bin("bin5", "NewValue"); + + BatchResults bresults = client.Operate(null, null, keys, + Operation.Put(bin), + Operation.Get() + ); + + for (int i = 0; i < bresults.records.Length; i++) + { + BatchRecord br = bresults.records[i]; + Assert.AreEqual(0, br.resultCode); + + Record r = br.record; + + string s = r.GetString(bin.name); + Assert.AreEqual("NewValue", s); + + object obj = r.GetValue(BinName); + Assert.IsNotNull(obj); + } + } + + [TestMethod] + public void BatchWriteComplex() + { + Expression wexp1 = Exp.Build(Exp.Add(Exp.IntBin(BinName), Exp.Val(1000))); + + Operation[] wops1 = Operation.Array(Operation.Put(new Bin(BinName2, 100))); + Operation[] wops2 = Operation.Array(ExpOperation.Write(BinName3, wexp1, ExpWriteFlags.DEFAULT)); + Operation[] rops1 = Operation.Array(Operation.Get(BinName2)); + Operation[] rops2 = Operation.Array(Operation.Get(BinName3)); + + BatchWritePolicy wp = new BatchWritePolicy(); + wp.sendKey = true; + + BatchWrite bw1 = new BatchWrite(new Key(args.ns, args.set, KeyPrefix + 1), wops1); + BatchWrite bw2 = new BatchWrite(new Key("invalid", args.set, KeyPrefix + 1), wops1); + BatchWrite bw3 = new BatchWrite(wp, new Key(args.ns, args.set, KeyPrefix + 6), wops2); + BatchDelete bd1 = new BatchDelete(new Key(args.ns, args.set, 10002)); + + List records = new List(); + records.Add(bw1); + records.Add(bw2); + records.Add(bw3); + records.Add(bd1); + + bool status = client.Operate(null, records); + + Assert.IsFalse(status); // "invalid" namespace triggers the false status. + Assert.AreEqual(0, bw1.resultCode); + AssertBinEqual(bw1.key, bw1.record, BinName2, 0); + Assert.AreEqual(ResultCode.INVALID_NAMESPACE, bw2.resultCode); + Assert.AreEqual(0, bw3.resultCode); + AssertBinEqual(bw3.key, bw3.record, BinName3, 0); + Assert.AreEqual(ResultCode.OK, bd1.resultCode); + + BatchRead br1 = new BatchRead(new Key(args.ns, args.set, KeyPrefix + 1), rops1); + BatchRead br2 = new BatchRead(new Key(args.ns, args.set, KeyPrefix + 6), rops2); + BatchRead br3 = new BatchRead(new Key(args.ns, args.set, 10002), true); + + records.Clear(); + records.Add(br1); + records.Add(br2); + records.Add(br3); + + status = client.Operate(null, records); + + Assert.IsFalse(status); // Read of deleted record causes status to be false. + AssertBinEqual(br1.key, br1.record, BinName2, 100); + AssertBinEqual(br2.key, br2.record, BinName3, 1006); + Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br3.resultCode); + } + + [TestMethod] + public void BatchDelete() + { + // Define keys + Key[] keys = new Key[] { new Key(args.ns, args.set, 10000), new Key(args.ns, args.set, 10001) }; + + // Ensure keys exists + bool[] exists = client.Exists(null, keys); + Assert.IsTrue(exists[0]); + Assert.IsTrue(exists[1]); + + // Delete keys + BatchResults br = client.Delete(null, null, keys); + Assert.IsTrue(br.status); + + // Ensure keys do not exist + exists = client.Exists(null, keys); + Assert.IsFalse(exists[0]); + Assert.IsFalse(exists[1]); + } + + [TestMethod] + public void BatchReadTTL() + { + // WARNING: This test takes a long time to run due to sleeps. + // Define keys + Key key1 = new(args.ns, args.set, 88888); + Key key2 = new(args.ns, args.set, 88889); + + // Write keys with ttl. + BatchWritePolicy bwp = new() + { + expiration = 10 + }; + Key[] keys = new Key[] { key1, key2 }; + client.Operate(null, bwp, keys, Operation.Put(new Bin("a", 1))); + + // Read records before they expire and reset read ttl on one record. + Util.Sleep(8000); + BatchReadPolicy brp1 = new() + { + readTouchTtlPercent = 80 + }; + + BatchReadPolicy brp2 = new() + { + readTouchTtlPercent = -1 + }; + + BatchRead br1 = new(brp1, key1, new String[] { "a" }); + BatchRead br2 = new(brp2, key2, new String[] { "a" }); + + List list = new() + { + br1, + br2 + }; + + bool rv = client.Operate(null, list); + + Assert.IsTrue(rv); + Assert.AreEqual(ResultCode.OK, br1.resultCode); + Assert.AreEqual(ResultCode.OK, br2.resultCode); + + // Read records again, but don't reset read ttl. + Util.Sleep(3000); + brp1.readTouchTtlPercent = -1; + brp2.readTouchTtlPercent = -1; + + br1 = new BatchRead(brp1, key1, new String[] { "a" }); + br2 = new BatchRead(brp2, key2, new String[] { "a" }); + + list.Clear(); + list.Add(br1); + list.Add(br2); + + rv = client.Operate(null, list); + + // Key 2 should have expired. + Assert.AreEqual(ResultCode.OK, br1.resultCode); + Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br2.resultCode); + Assert.IsFalse(rv); + + // Read record after it expires, showing it's gone. + Util.Sleep(8000); + rv = client.Operate(null, list); + Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br1.resultCode); + Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br2.resultCode); + Assert.IsFalse(rv); + } + + + private void AssertBatchBinEqual(List list, string binName, int i) + { + BatchRead batch = list[i]; + AssertBinEqual(batch.key, batch.record, binName, ValuePrefix + (i + 1)); + } + + private void AssertBatchRecordExists(List list, string binName, int i) + { + BatchRead batch = list[i]; + AssertRecordFound(batch.key, batch.record); + Assert.AreNotEqual(0, batch.record.generation); + // ttl can be zero if server default-ttl = 0. + // Assert.AreNotEqual(0, batch.record.expiration); + } + } +} diff --git a/AerospikeTest/Sync/Basic/TestTxn.cs b/AerospikeTest/Sync/Basic/TestTxn.cs new file mode 100644 index 00000000..b5024183 --- /dev/null +++ b/AerospikeTest/Sync/Basic/TestTxn.cs @@ -0,0 +1,734 @@ +/* + * Copyright 2012-2018 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Aerospike.Client; +using System.Reflection; +using System.Text; + +namespace Aerospike.Test +{ + [TestClass] + public class TestTxn : TestSync + { + private static readonly string binName = "bin"; + + [ClassInitialize()] + public static void Prepare(TestContext testContext) + { + Assembly assembly = Assembly.GetExecutingAssembly(); + RegisterTask task = client.Register(null, assembly, "Aerospike.Test.LuaResources.record_example.lua", "record_example.lua", Language.LUA); + task.Wait(); + } + + [TestMethod] + public void TxnWrite() + { + Key key = new(args.ns, args.set, "mrtkey1"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnWriteTwice() + { + Key key = new(args.ns, args.set, "mrtkey2"); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val1")); + client.Put(wp, key, new Bin(binName, "val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnWriteConflict() + { + Key key = new(args.ns, args.set, "mrtkey021"); + + Txn txn1 = new(); + Txn txn2 = new(); + + WritePolicy wp1 = client.WritePolicyDefault.Clone(); + WritePolicy wp2 = client.WritePolicyDefault.Clone(); + wp1.Txn = txn1; + wp2.Txn = txn2; + + client.Put(wp1, key, new Bin(binName, "val1")); + + try + { + client.Put(wp2, key, new Bin(binName, "val2")); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException ae) + { + if (ae.Result != ResultCode.MRT_BLOCKED) + { + throw; + } + } + + client.Commit(txn1); + client.Commit(txn2); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnWriteBlock() + { + Key key = new(args.ns, args.set, "mrtkey3"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + try + { + // This write should be blocked. + client.Put(null, key, new Bin(binName, "val3")); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException e) + { + if (e.Result != ResultCode.MRT_BLOCKED) + { + throw; + } + } + + client.Commit(txn); + } + + [TestMethod] + public void TxnWriteRead() + { + Key key = new(args.ns, args.set, "mrtkey4"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + + client.Commit(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnWriteAbort() + { + Key key = new(args.ns, args.set, "mrtkey5"); + + client.Delete(null, key); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + Policy p = client.ReadPolicyDefault.Clone(); + p.Txn = txn; + Record record = client.Get(p, key); + AssertBinEqual(key, record, binName, "val2"); + + client.Abort(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + Assert.AreEqual(3, record.generation); + } + + [TestMethod] + public void TxnDelete() + { + Key key = new(args.ns, args.set, "mrtkey6"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + Assert.IsNull(record); + } + + [TestMethod] + public void TxnDeleteAbort() + { + Key key = new(args.ns, args.set, "mrtkey7"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnDeleteTwice() + { + Key key = new(args.ns, args.set, "mrtkey8"); + + Txn txn = new(); + + client.Put(null, key, new Bin(binName, "val1")); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + client.Delete(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + Assert.IsNull(record); + } + + [TestMethod] + public void TxnTouch() + { + Key key = new(args.ns, args.set, "mrtkey91"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Touch(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnTouchAbort() + { + Key key = new(args.ns, args.set, "mrtkey10"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Touch(wp, key); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnOperateWrite() + { + Key key = new(args.ns, args.set, "mrtkey11"); + + client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + Record record = client.Operate(wp, key, + Operation.Put(new Bin(binName, "val2")), + Operation.Get("bin2") + ); + AssertBinEqual(key, record, "bin2", "bal1"); + + client.Commit(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnOperateWriteAbort() + { + Key key = new(args.ns, args.set, "mrtkey12"); + + client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + Record record = client.Operate(wp, key, + Operation.Put(new Bin(binName, "val2")), + Operation.Get("bin2") + ); + AssertBinEqual(key, record, "bin2", "bal1"); + + client.Abort(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnUDF() + { + Key key = new(args.ns, args.set, "mrtkey13"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnUDFAbort() + { + Key key = new(args.ns, args.set, "mrtkey14"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnBatch() + { + Key[] keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Record[] recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + + Txn txn = new(); + + bin = new(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.Txn = txn; + + BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + + if (!bresults.status) + { + StringBuilder sb = new(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in bresults.records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + + throw new AerospikeException(sb.ToString()); + } + + client.Commit(txn); + + recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 2); + } + + [TestMethod] + public void TxnBatchAbort() + { + var keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Record[] recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + + Txn txn = new(); + + bin = new Bin(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.Txn = txn; + + BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + + if (!bresults.status) + { + StringBuilder sb = new(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in bresults.records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + + throw new AerospikeException(sb.ToString()); + } + + client.Abort(txn); + + recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + } + + [TestMethod] + public void TxnWriteCommitAbort() + { + Key key = new(args.ns, args.set, "mrtkey15"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + Policy p = client.ReadPolicyDefault.Clone(); + p.Txn = txn; + Record record = client.Get(p, key); + AssertBinEqual(key, record, binName, "val2"); + + client.Commit(txn); + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + + var abortStatus = client.Abort(txn); + Assert.AreEqual(AbortStatus.AbortStatusType.ALREADY_COMMITTED, abortStatus); + } + + [TestMethod] + public void TxnWriteReadTwoTxn() + { + Txn txn1 = new(); + Txn txn2 = new(); + + Key key = new(args.ns, args.set, "mrtkey16"); + + client.Put(null, key, new Bin(binName, "val1")); + + var rp1 = client.ReadPolicyDefault.Clone(); + rp1.Txn = txn1; + var record = client.Get(rp1, key); + AssertBinEqual(key, record, binName, "val1"); + + var rp2 = client.ReadPolicyDefault.Clone(); + rp2.Txn = txn2; + record = client.Get(rp2, key); + AssertBinEqual(key, record, binName, "val1"); + + var status = client.Commit(txn1); + Assert.AreEqual(CommitStatus.CommitStatusType.OK, status); + + status = client.Commit(txn2); + Assert.AreEqual(CommitStatus.CommitStatusType.OK, status); + } + + [TestMethod] + public void TxnLUTCommit() // Test Case 38 + { + Txn txn = new(); // T0 + + Key key1 = new(args.ns, args.set, "mrtkey17"); + Key key2 = new(args.ns, args.set, "mrtkey18"); + Key key3 = new(args.ns, args.set, "mrtkey19"); + + client.Delete(null, key1); + client.Delete(null, key2); + client.Delete(null, key3); + + var wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key1, new Bin(binName, "val1")); // T1 + + var p = client.ReadPolicyDefault.Clone(); + p.Txn = txn; + var record = client.Get(p, key1); // T2 + Assert.AreEqual(1, record.generation); + + client.Put(wp, key1, new Bin(binName, "val11")); // T3 + + record = client.Get(p, key1); // T4 + Assert.AreEqual(2, record.generation); + + client.Put(null, key2, new Bin(binName, "val1")); // T5 + + record = client.Get(p, key2); // T6 + Assert.AreEqual(1, record.generation); + + client.Put(wp, key2, new Bin(binName, "val11")); // T7 + + record = client.Get(p, key2); // T8 + Assert.AreEqual(2, record.generation); + + client.Put(wp, key3, new Bin(binName, "val1")); // T9 + + record = client.Get(p, key3); // T10 + Assert.AreEqual(1, record.generation); + + client.Commit(txn); // T11 + + record = client.Get(null, key1); // T12 + Assert.AreEqual(3, record.generation); + record = client.Get(null, key2); + Assert.AreEqual(3, record.generation); + record = client.Get(null, key3); + Assert.AreEqual(2, record.generation); + } + + [TestMethod] + public void TxnLUTAbort() // Test Case 39 + { + client.Truncate(null, args.ns, args.set, DateTime.Now); + + Txn txn = new(); // T0 + + Key key1 = new(args.ns, args.set, "mrtkey20"); + Key key2 = new(args.ns, args.set, "mrtkey21"); + Key key3 = new(args.ns, args.set, "mrtkey22"); + + //client.Delete(null, key1); + //client.Delete(null, key2); + //client.Delete(null, key3); + + client.Put(null, key1, new Bin(binName, "val1")); // T1 + + var p = client.ReadPolicyDefault.Clone(); + p.Txn = txn; + var record = client.Get(p, key1); // T2 + Assert.AreEqual(1, record.generation); + + var binR2O = new Bin(binName, "val2"); + client.Put(null, key2, binR2O); // T3 + record = client.Get(p, key2); // T4 + Assert.AreEqual(1, record.generation); + + var wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key2, new Bin(binName, "val11")); // T5 + + record = client.Get(p, key2); + Assert.AreEqual(2, record.generation); + + record = client.Get(null, key2); // T6 + Assert.AreEqual(1, record.generation); + + client.Put(wp, key3, new Bin(binName, "val3")); // T7 + record = client.Get(p, key3); + Assert.AreEqual(1, record.generation); + + var binR1UO = new Bin(binName, "val1"); // T8 + client.Put(null, key1, binR1UO); + record = client.Get(null, key1); + Assert.AreEqual(2, record.generation); + + try + { + client.Put(wp, key1, new Bin(binName, "val1111")); // T9 + record = client.Get(p, key1); + Assert.AreEqual(2, record.generation); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException ae) + { + if (ae.Result != ResultCode.MRT_VERSION_MISMATCH) + { + throw; + } + } + + try + { + client.Commit(txn); // T10 + } + catch (AerospikeException.Commit) + { + + } + + record = client.Get(null, key1); // T11 + Assert.AreEqual(2, record.generation); + AssertBinEqual(key1, record, binR1UO); + record = client.Get(null, key2); + Assert.AreEqual(3, record.generation); + AssertBinEqual(key2, record, binR2O); + record = client.Get(null, key3); + Assert.IsNull(record); + + // Cleanup + client.Abort(txn); + } + + [TestMethod] + public void TxnWriteAfterCommit() + { + Key key = new(args.ns, args.set, "mrtkey23"); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val1")); + + client.Commit(txn); + + try + { + client.Put(wp, key, new Bin(binName, "val1")); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException ae) + { + if (!ae.Message.Contains("Command not allowed in current MRT state:")) + { + throw; + } + } + + } + + [TestMethod] + public void TxnInvalidNamespace() + { + Key key = new("invalid", args.set, "mrtkey"); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault.Clone(); + wp.Txn = txn; + + try + { + client.Put(wp, key, new Bin(binName, "val1")); + client.Commit(txn); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException e) + { + if (e.Result != ResultCode.INVALID_NAMESPACE) + { + throw; + } + } + } + + private static void AssertBatchEqual(Key[] keys, Record[] recs, int expected) + { + for (int i = 0; i < keys.Length; i++) + { + Key key = keys[i]; + Record rec = recs[i]; + + Assert.IsNotNull(rec); + + int received = rec.GetInt(binName); + Assert.AreEqual(expected, received); + } + } + } +} diff --git a/AerospikeTest/settings.json b/AerospikeTest/settings.json index 8ec89468..058b2f24 100644 --- a/AerospikeTest/settings.json +++ b/AerospikeTest/settings.json @@ -1,17 +1,18 @@ -{ - "Host": "localhost", - "Port": 3000, - "ClusterName": "", - "Namespace": "test", - "Set": "test", - "User": "", - "Password": "", - "Timeout": 25000, - "TlsEnable": false, - "TlsName": "", - "TlsProtocols": "", - "TlsRevoke": "", - "TlsClientCertFile": "", - "TlsLoginOnly": false, - "AuthMode": "INTERNAL" -} +{ + "Host": "localhost", + "Port": 3000, + "ClusterName": "", + "Namespace": "test", + "Set": "test", + "User": "", + "Password": "", + "Timeout": 25000, + "UseServicesAlternate": false, + "TlsEnable": false, + "TlsName": "", + "TlsProtocols": "", + "TlsRevoke": "", + "TlsClientCertFile": "", + "TlsLoginOnly": false, + "AuthMode": "INTERNAL" +}